drm/vmwgfx: Make vmwgfx dma buffers prime aware
Should we need to share dma buffers using prime, let's make them prime aware. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
This commit is contained in:
parent
79e5f81003
commit
c486d4f894
|
@ -35,7 +35,7 @@
|
||||||
#define VMW_RES_EVICT_ERR_COUNT 10
|
#define VMW_RES_EVICT_ERR_COUNT 10
|
||||||
|
|
||||||
struct vmw_user_dma_buffer {
|
struct vmw_user_dma_buffer {
|
||||||
struct ttm_base_object base;
|
struct ttm_prime_object prime;
|
||||||
struct vmw_dma_buffer dma;
|
struct vmw_dma_buffer dma;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -387,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
||||||
{
|
{
|
||||||
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||||
|
|
||||||
ttm_base_object_kfree(vmw_user_bo, base);
|
ttm_prime_object_kfree(vmw_user_bo, prime);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
||||||
|
@ -401,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
|
||||||
if (unlikely(base == NULL))
|
if (unlikely(base == NULL))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
|
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
|
||||||
|
prime.base);
|
||||||
bo = &vmw_user_bo->dma.base;
|
bo = &vmw_user_bo->dma.base;
|
||||||
ttm_bo_unref(&bo);
|
ttm_bo_unref(&bo);
|
||||||
}
|
}
|
||||||
|
@ -442,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
tmp = ttm_bo_reference(&user_bo->dma.base);
|
tmp = ttm_bo_reference(&user_bo->dma.base);
|
||||||
ret = ttm_base_object_init(tfile,
|
ret = ttm_prime_object_init(tfile,
|
||||||
&user_bo->base,
|
size,
|
||||||
shareable,
|
&user_bo->prime,
|
||||||
ttm_buffer_type,
|
shareable,
|
||||||
&vmw_user_dmabuf_release, NULL);
|
ttm_buffer_type,
|
||||||
|
&vmw_user_dmabuf_release, NULL);
|
||||||
if (unlikely(ret != 0)) {
|
if (unlikely(ret != 0)) {
|
||||||
ttm_bo_unref(&tmp);
|
ttm_bo_unref(&tmp);
|
||||||
goto out_no_base_object;
|
goto out_no_base_object;
|
||||||
}
|
}
|
||||||
|
|
||||||
*p_dma_buf = &user_bo->dma;
|
*p_dma_buf = &user_bo->dma;
|
||||||
*handle = user_bo->base.hash.key;
|
*handle = user_bo->prime.base.hash.key;
|
||||||
|
|
||||||
out_no_base_object:
|
out_no_base_object:
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -475,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
vmw_user_bo = vmw_user_dma_buffer(bo);
|
vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||||
return (vmw_user_bo->base.tfile == tfile ||
|
return (vmw_user_bo->prime.base.tfile == tfile ||
|
||||||
vmw_user_bo->base.shareable) ? 0 : -EPERM;
|
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
||||||
|
@ -538,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(base->object_type != ttm_buffer_type)) {
|
if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
|
||||||
ttm_base_object_unref(&base);
|
ttm_base_object_unref(&base);
|
||||||
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
|
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
|
||||||
(unsigned long)handle);
|
(unsigned long)handle);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
|
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
|
||||||
|
prime.base);
|
||||||
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
|
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
|
||||||
ttm_base_object_unref(&base);
|
ttm_base_object_unref(&base);
|
||||||
*out = &vmw_user_bo->dma;
|
*out = &vmw_user_bo->dma;
|
||||||
|
@ -562,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
|
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
|
||||||
return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
|
return ttm_ref_object_add(tfile, &user_bo->prime.base,
|
||||||
|
TTM_REF_USAGE, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -807,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
||||||
goto out_no_dmabuf;
|
goto out_no_dmabuf;
|
||||||
|
|
||||||
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
|
tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
|
||||||
ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
|
ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
|
||||||
&vmw_user_bo->base,
|
args->size,
|
||||||
false,
|
&vmw_user_bo->prime,
|
||||||
ttm_buffer_type,
|
false,
|
||||||
&vmw_user_dmabuf_release, NULL);
|
ttm_buffer_type,
|
||||||
|
&vmw_user_dmabuf_release, NULL);
|
||||||
if (unlikely(ret != 0))
|
if (unlikely(ret != 0))
|
||||||
goto out_no_base_object;
|
goto out_no_base_object;
|
||||||
|
|
||||||
args->handle = vmw_user_bo->base.hash.key;
|
args->handle = vmw_user_bo->prime.base.hash.key;
|
||||||
|
|
||||||
out_no_base_object:
|
out_no_base_object:
|
||||||
ttm_bo_unref(&tmp);
|
ttm_bo_unref(&tmp);
|
||||||
|
|
Loading…
Reference in New Issue