mirror of https://gitee.com/openkylin/linux.git
drm/exynos: add userptr feature for g2d module
This patch adds userptr feautre for G2D module. The userptr means user space address allocated by malloc(). And the purpose of this feature is to make G2D's dma able to access the user space region. To user this feature, user should flag G2D_BUF_USRPTR to offset variable of struct drm_exynos_g2d_cmd and fill struct drm_exynos_g2d_userptr with user space address and size for it and then should set a pointer to drm_exynos_g2d_userptr object to data variable of struct drm_exynos_g2d_cmd. The last bit of offset variable is used to check if the cmdlist's buffer type is userptr or not. If userptr, the g2d driver gets user space address and size and then gets pages through get_user_pages(). (another case is counted as gem handle) Below is sample codes: static void set_cmd(struct drm_exynos_g2d_cmd *cmd, unsigned long offset, unsigned long data) { cmd->offset = offset; cmd->data = data; } static int solid_fill_test(int x, int y, unsigned long userptr) { struct drm_exynos_g2d_cmd cmd_gem[5]; struct drm_exynos_g2d_userptr g2d_userptr; unsigned int gem_nr = 0; ... g2d_userptr.userptr = userptr; g2d_userptr.size = x * y * 4; set_cmd(&cmd_gem[gem_nr++], DST_BASE_ADDR_REG | G2D_BUF_USERPTR, (unsigned long)&g2d_userptr); ... } int main(int argc, char **argv) { unsigned long addr; ... addr = malloc(x * y * 4); ... solid_fill_test(x, y, addr); ... } And next, the pages are mapped with iommu table and the device address is set to cmdlist so that G2D's dma can access it. As you may know, the pages from get_user_pages() are pinned. In other words, they CAN NOT be migrated and also swapped out. So the dma access would be safe. But the use of userptr feature has performance overhead so this patch also has memory pool to the userptr feature. Please, assume that user sends cmdlist filled with userptr and size every time to g2d driver, and the get_user_pages funcion will be called every time. The memory pool has maximum 64MB size and the userptr that user had ever sent, is holded in the memory pool. This meaning is that if the userptr from user is same as one in the memory pool, device address to the userptr in the memory pool is set to cmdlist. And last, the pages from get_user_pages() will be freed once user calls free() and the dma access is completed. Actually, get_user_pages() takes 2 reference counts if the user process has never accessed user region allocated by malloc(). Then, if the user calls free(), the page reference count becomes 1 and becomes 0 with put_page() call. And the reverse holds as well. This means how the pages backed are used by dma and freed. This patch is based on "drm/exynos: add iommu support for g2d", https://patchwork.kernel.org/patch/1629481/ Signed-off-by: Inki Dae <inki.dae@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
This commit is contained in:
parent
1119707e22
commit
2a3098ff6c
|
@ -231,8 +231,7 @@ struct exynos_drm_g2d_private {
|
|||
struct device *dev;
|
||||
struct list_head inuse_cmdlist;
|
||||
struct list_head event_list;
|
||||
struct list_head gem_list;
|
||||
unsigned int gem_nr;
|
||||
struct list_head userptr_list;
|
||||
};
|
||||
|
||||
struct drm_exynos_file_private {
|
||||
|
|
|
@ -97,11 +97,19 @@
|
|||
|
||||
#define MAX_BUF_ADDR_NR 6
|
||||
|
||||
/* maximum buffer pool size of userptr is 64MB as default */
|
||||
#define MAX_POOL (64 * 1024 * 1024)
|
||||
|
||||
enum {
|
||||
BUF_TYPE_GEM = 1,
|
||||
BUF_TYPE_USERPTR,
|
||||
};
|
||||
|
||||
/* cmdlist data structure */
|
||||
struct g2d_cmdlist {
|
||||
u32 head;
|
||||
u32 data[G2D_CMDLIST_DATA_NUM];
|
||||
u32 last; /* last data offset */
|
||||
u32 head;
|
||||
unsigned long data[G2D_CMDLIST_DATA_NUM];
|
||||
u32 last; /* last data offset */
|
||||
};
|
||||
|
||||
struct drm_exynos_pending_g2d_event {
|
||||
|
@ -109,11 +117,26 @@ struct drm_exynos_pending_g2d_event {
|
|||
struct drm_exynos_g2d_event event;
|
||||
};
|
||||
|
||||
struct g2d_cmdlist_userptr {
|
||||
struct list_head list;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long userptr;
|
||||
unsigned long size;
|
||||
struct page **pages;
|
||||
unsigned int npages;
|
||||
struct sg_table *sgt;
|
||||
struct vm_area_struct *vma;
|
||||
atomic_t refcount;
|
||||
bool in_pool;
|
||||
bool out_of_list;
|
||||
};
|
||||
|
||||
struct g2d_cmdlist_node {
|
||||
struct list_head list;
|
||||
struct g2d_cmdlist *cmdlist;
|
||||
unsigned int map_nr;
|
||||
unsigned int handles[MAX_BUF_ADDR_NR];
|
||||
unsigned long handles[MAX_BUF_ADDR_NR];
|
||||
unsigned int obj_type[MAX_BUF_ADDR_NR];
|
||||
dma_addr_t dma_addr;
|
||||
|
||||
struct drm_exynos_pending_g2d_event *event;
|
||||
|
@ -152,6 +175,9 @@ struct g2d_data {
|
|||
struct list_head runqueue;
|
||||
struct mutex runqueue_mutex;
|
||||
struct kmem_cache *runqueue_slab;
|
||||
|
||||
unsigned long current_pool;
|
||||
unsigned long max_pool;
|
||||
};
|
||||
|
||||
static int g2d_init_cmdlist(struct g2d_data *g2d)
|
||||
|
@ -256,6 +282,229 @@ static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
|
|||
list_add_tail(&node->event->base.link, &g2d_priv->event_list);
|
||||
}
|
||||
|
||||
static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
|
||||
unsigned long obj,
|
||||
bool force)
|
||||
{
|
||||
struct g2d_cmdlist_userptr *g2d_userptr =
|
||||
(struct g2d_cmdlist_userptr *)obj;
|
||||
|
||||
if (!obj)
|
||||
return;
|
||||
|
||||
if (force)
|
||||
goto out;
|
||||
|
||||
atomic_dec(&g2d_userptr->refcount);
|
||||
|
||||
if (atomic_read(&g2d_userptr->refcount) > 0)
|
||||
return;
|
||||
|
||||
if (g2d_userptr->in_pool)
|
||||
return;
|
||||
|
||||
out:
|
||||
exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
|
||||
g2d_userptr->npages,
|
||||
g2d_userptr->vma);
|
||||
|
||||
if (!g2d_userptr->out_of_list)
|
||||
list_del_init(&g2d_userptr->list);
|
||||
|
||||
sg_free_table(g2d_userptr->sgt);
|
||||
kfree(g2d_userptr->sgt);
|
||||
g2d_userptr->sgt = NULL;
|
||||
|
||||
kfree(g2d_userptr->pages);
|
||||
kfree(g2d_userptr);
|
||||
g2d_userptr->pages = NULL;
|
||||
g2d_userptr = NULL;
|
||||
}
|
||||
|
||||
dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
|
||||
unsigned long userptr,
|
||||
unsigned long size,
|
||||
struct drm_file *filp,
|
||||
unsigned long *obj)
|
||||
{
|
||||
struct drm_exynos_file_private *file_priv = filp->driver_priv;
|
||||
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
|
||||
struct g2d_cmdlist_userptr *g2d_userptr;
|
||||
struct g2d_data *g2d;
|
||||
struct page **pages;
|
||||
struct sg_table *sgt;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long start, end;
|
||||
unsigned int npages, offset;
|
||||
int ret;
|
||||
|
||||
if (!size) {
|
||||
DRM_ERROR("invalid userptr size.\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
g2d = dev_get_drvdata(g2d_priv->dev);
|
||||
|
||||
/* check if userptr already exists in userptr_list. */
|
||||
list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
|
||||
if (g2d_userptr->userptr == userptr) {
|
||||
/*
|
||||
* also check size because there could be same address
|
||||
* and different size.
|
||||
*/
|
||||
if (g2d_userptr->size == size) {
|
||||
atomic_inc(&g2d_userptr->refcount);
|
||||
*obj = (unsigned long)g2d_userptr;
|
||||
|
||||
return &g2d_userptr->dma_addr;
|
||||
}
|
||||
|
||||
/*
|
||||
* at this moment, maybe g2d dma is accessing this
|
||||
* g2d_userptr memory region so just remove this
|
||||
* g2d_userptr object from userptr_list not to be
|
||||
* referred again and also except it the userptr
|
||||
* pool to be released after the dma access completion.
|
||||
*/
|
||||
g2d_userptr->out_of_list = true;
|
||||
g2d_userptr->in_pool = false;
|
||||
list_del_init(&g2d_userptr->list);
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
|
||||
if (!g2d_userptr) {
|
||||
DRM_ERROR("failed to allocate g2d_userptr.\n");
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
atomic_set(&g2d_userptr->refcount, 1);
|
||||
|
||||
start = userptr & PAGE_MASK;
|
||||
offset = userptr & ~PAGE_MASK;
|
||||
end = PAGE_ALIGN(userptr + size);
|
||||
npages = (end - start) >> PAGE_SHIFT;
|
||||
g2d_userptr->npages = npages;
|
||||
|
||||
pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
DRM_ERROR("failed to allocate pages.\n");
|
||||
kfree(g2d_userptr);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
vma = find_vma(current->mm, userptr);
|
||||
if (!vma) {
|
||||
DRM_ERROR("failed to get vm region.\n");
|
||||
ret = -EFAULT;
|
||||
goto err_free_pages;
|
||||
}
|
||||
|
||||
if (vma->vm_end < userptr + size) {
|
||||
DRM_ERROR("vma is too small.\n");
|
||||
ret = -EFAULT;
|
||||
goto err_free_pages;
|
||||
}
|
||||
|
||||
g2d_userptr->vma = exynos_gem_get_vma(vma);
|
||||
if (!g2d_userptr->vma) {
|
||||
DRM_ERROR("failed to copy vma.\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_pages;
|
||||
}
|
||||
|
||||
g2d_userptr->size = size;
|
||||
|
||||
ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
|
||||
npages, pages, vma);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to get user pages from userptr.\n");
|
||||
goto err_put_vma;
|
||||
}
|
||||
|
||||
g2d_userptr->pages = pages;
|
||||
|
||||
sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
|
||||
if (!sgt) {
|
||||
DRM_ERROR("failed to allocate sg table.\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_userptr;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
|
||||
size, GFP_KERNEL);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to get sgt from pages.\n");
|
||||
goto err_free_sgt;
|
||||
}
|
||||
|
||||
g2d_userptr->sgt = sgt;
|
||||
|
||||
ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to map sgt with dma region.\n");
|
||||
goto err_free_sgt;
|
||||
}
|
||||
|
||||
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
|
||||
g2d_userptr->userptr = userptr;
|
||||
|
||||
list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
|
||||
|
||||
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
|
||||
g2d->current_pool += npages << PAGE_SHIFT;
|
||||
g2d_userptr->in_pool = true;
|
||||
}
|
||||
|
||||
*obj = (unsigned long)g2d_userptr;
|
||||
|
||||
return &g2d_userptr->dma_addr;
|
||||
|
||||
err_free_sgt:
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
sgt = NULL;
|
||||
|
||||
err_free_userptr:
|
||||
exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
|
||||
g2d_userptr->npages,
|
||||
g2d_userptr->vma);
|
||||
|
||||
err_put_vma:
|
||||
exynos_gem_put_vma(g2d_userptr->vma);
|
||||
|
||||
err_free_pages:
|
||||
kfree(pages);
|
||||
kfree(g2d_userptr);
|
||||
pages = NULL;
|
||||
g2d_userptr = NULL;
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static void g2d_userptr_free_all(struct drm_device *drm_dev,
|
||||
struct g2d_data *g2d,
|
||||
struct drm_file *filp)
|
||||
{
|
||||
struct drm_exynos_file_private *file_priv = filp->driver_priv;
|
||||
struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
|
||||
struct g2d_cmdlist_userptr *g2d_userptr, *n;
|
||||
|
||||
list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
|
||||
if (g2d_userptr->in_pool)
|
||||
g2d_userptr_put_dma_addr(drm_dev,
|
||||
(unsigned long)g2d_userptr,
|
||||
true);
|
||||
|
||||
g2d->current_pool = 0;
|
||||
}
|
||||
|
||||
static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
|
||||
struct g2d_cmdlist_node *node,
|
||||
struct drm_device *drm_dev,
|
||||
|
@ -272,10 +521,31 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
|
|||
offset = cmdlist->last - (i * 2 + 1);
|
||||
handle = cmdlist->data[offset];
|
||||
|
||||
addr = exynos_drm_gem_get_dma_addr(drm_dev, handle, file);
|
||||
if (IS_ERR(addr)) {
|
||||
node->map_nr = i;
|
||||
return -EFAULT;
|
||||
if (node->obj_type[i] == BUF_TYPE_GEM) {
|
||||
addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
|
||||
file);
|
||||
if (IS_ERR(addr)) {
|
||||
node->map_nr = i;
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
struct drm_exynos_g2d_userptr g2d_userptr;
|
||||
|
||||
if (copy_from_user(&g2d_userptr, (void __user *)handle,
|
||||
sizeof(struct drm_exynos_g2d_userptr))) {
|
||||
node->map_nr = i;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
addr = g2d_userptr_get_dma_addr(drm_dev,
|
||||
g2d_userptr.userptr,
|
||||
g2d_userptr.size,
|
||||
file,
|
||||
&handle);
|
||||
if (IS_ERR(addr)) {
|
||||
node->map_nr = i;
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
cmdlist->data[offset] = *addr;
|
||||
|
@ -293,9 +563,14 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < node->map_nr; i++) {
|
||||
unsigned int handle = node->handles[i];
|
||||
unsigned long handle = node->handles[i];
|
||||
|
||||
exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle, filp);
|
||||
if (node->obj_type[i] == BUF_TYPE_GEM)
|
||||
exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
|
||||
filp);
|
||||
else
|
||||
g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
|
||||
false);
|
||||
|
||||
node->handles[i] = 0;
|
||||
}
|
||||
|
@ -438,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
|
||||
static int g2d_check_reg_offset(struct device *dev,
|
||||
struct g2d_cmdlist_node *node,
|
||||
int nr, bool for_addr)
|
||||
{
|
||||
struct g2d_cmdlist *cmdlist = node->cmdlist;
|
||||
int reg_offset;
|
||||
int index;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
index = cmdlist->last - 2 * (i + 1);
|
||||
|
||||
if (for_addr) {
|
||||
/* check userptr buffer type. */
|
||||
reg_offset = (cmdlist->data[index] &
|
||||
~0x7fffffff) >> 31;
|
||||
if (reg_offset) {
|
||||
node->obj_type[i] = BUF_TYPE_USERPTR;
|
||||
cmdlist->data[index] &= ~G2D_BUF_USERPTR;
|
||||
}
|
||||
}
|
||||
|
||||
reg_offset = cmdlist->data[index] & ~0xfffff000;
|
||||
|
||||
if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
|
||||
|
@ -463,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
|
|||
case G2D_MSK_BASE_ADDR:
|
||||
if (!for_addr)
|
||||
goto err;
|
||||
|
||||
if (node->obj_type[i] != BUF_TYPE_USERPTR)
|
||||
node->obj_type[i] = BUF_TYPE_GEM;
|
||||
break;
|
||||
default:
|
||||
if (for_addr)
|
||||
|
@ -474,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
|
|||
return 0;
|
||||
|
||||
err:
|
||||
dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]);
|
||||
dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -574,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
|
|||
}
|
||||
|
||||
/* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
|
||||
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2;
|
||||
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
|
||||
if (size > G2D_CMDLIST_DATA_NUM) {
|
||||
dev_err(dev, "cmdlist size is too big\n");
|
||||
ret = -EINVAL;
|
||||
|
@ -591,25 +882,25 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
|
|||
}
|
||||
cmdlist->last += req->cmd_nr * 2;
|
||||
|
||||
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false);
|
||||
ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
|
||||
if (ret < 0)
|
||||
goto err_free_event;
|
||||
|
||||
node->map_nr = req->cmd_gem_nr;
|
||||
if (req->cmd_gem_nr) {
|
||||
struct drm_exynos_g2d_cmd *cmd_gem;
|
||||
node->map_nr = req->cmd_buf_nr;
|
||||
if (req->cmd_buf_nr) {
|
||||
struct drm_exynos_g2d_cmd *cmd_buf;
|
||||
|
||||
cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem;
|
||||
cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
|
||||
|
||||
if (copy_from_user(cmdlist->data + cmdlist->last,
|
||||
(void __user *)cmd_gem,
|
||||
sizeof(*cmd_gem) * req->cmd_gem_nr)) {
|
||||
(void __user *)cmd_buf,
|
||||
sizeof(*cmd_buf) * req->cmd_buf_nr)) {
|
||||
ret = -EFAULT;
|
||||
goto err_free_event;
|
||||
}
|
||||
cmdlist->last += req->cmd_gem_nr * 2;
|
||||
cmdlist->last += req->cmd_buf_nr * 2;
|
||||
|
||||
ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true);
|
||||
ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
|
||||
if (ret < 0)
|
||||
goto err_free_event;
|
||||
|
||||
|
@ -759,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
|
|||
|
||||
INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
|
||||
INIT_LIST_HEAD(&g2d_priv->event_list);
|
||||
INIT_LIST_HEAD(&g2d_priv->gem_list);
|
||||
INIT_LIST_HEAD(&g2d_priv->userptr_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -793,6 +1084,9 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
|
|||
}
|
||||
mutex_unlock(&g2d->cmdlist_mutex);
|
||||
|
||||
/* release all g2d_userptr in pool. */
|
||||
g2d_userptr_free_all(drm_dev, g2d, file);
|
||||
|
||||
kfree(file_priv->g2d_priv);
|
||||
}
|
||||
|
||||
|
@ -863,6 +1157,8 @@ static int __devinit g2d_probe(struct platform_device *pdev)
|
|||
goto err_put_clk;
|
||||
}
|
||||
|
||||
g2d->max_pool = MAX_POOL;
|
||||
|
||||
platform_set_drvdata(pdev, g2d);
|
||||
|
||||
subdrv = &g2d->subdrv;
|
||||
|
|
|
@ -448,6 +448,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
struct vm_area_struct *vma_copy;
|
||||
|
||||
vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
|
||||
if (!vma_copy)
|
||||
return NULL;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->open)
|
||||
vma->vm_ops->open(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
get_file(vma->vm_file);
|
||||
|
||||
memcpy(vma_copy, vma, sizeof(*vma));
|
||||
|
||||
vma_copy->vm_mm = NULL;
|
||||
vma_copy->vm_next = NULL;
|
||||
vma_copy->vm_prev = NULL;
|
||||
|
||||
return vma_copy;
|
||||
}
|
||||
|
||||
void exynos_gem_put_vma(struct vm_area_struct *vma)
|
||||
{
|
||||
if (!vma)
|
||||
return;
|
||||
|
||||
if (vma->vm_ops && vma->vm_ops->close)
|
||||
vma->vm_ops->close(vma);
|
||||
|
||||
if (vma->vm_file)
|
||||
fput(vma->vm_file);
|
||||
|
||||
kfree(vma);
|
||||
}
|
||||
|
||||
int exynos_gem_get_pages_from_userptr(unsigned long start,
|
||||
unsigned int npages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
int get_npages;
|
||||
|
||||
/* the memory region mmaped with VM_PFNMAP. */
|
||||
if (vma_is_io(vma)) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
int ret = follow_pfn(vma, start, &pfn);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pages[i] = pfn_to_page(pfn);
|
||||
}
|
||||
|
||||
if (i != npages) {
|
||||
DRM_ERROR("failed to get user_pages.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
get_npages = get_user_pages(current, current->mm, start,
|
||||
npages, 1, 1, pages, NULL);
|
||||
get_npages = max(get_npages, 0);
|
||||
if (get_npages != npages) {
|
||||
DRM_ERROR("failed to get user_pages.\n");
|
||||
while (get_npages)
|
||||
put_page(pages[--get_npages]);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void exynos_gem_put_pages_to_userptr(struct page **pages,
|
||||
unsigned int npages,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
if (!vma_is_io(vma)) {
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < npages; i++) {
|
||||
set_page_dirty_lock(pages[i]);
|
||||
|
||||
/*
|
||||
* undo the reference we took when populating
|
||||
* the table.
|
||||
*/
|
||||
put_page(pages[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
int nents;
|
||||
|
||||
mutex_lock(&drm_dev->struct_mutex);
|
||||
|
||||
nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
|
||||
if (!nents) {
|
||||
DRM_ERROR("failed to map sgl with dma.\n");
|
||||
mutex_unlock(&drm_dev->struct_mutex);
|
||||
return nents;
|
||||
}
|
||||
|
||||
mutex_unlock(&drm_dev->struct_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
|
||||
}
|
||||
|
||||
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
|
||||
{
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
|
|
@ -35,22 +35,29 @@
|
|||
* exynos drm gem buffer structure.
|
||||
*
|
||||
* @kvaddr: kernel virtual address to allocated memory region.
|
||||
* *userptr: user space address.
|
||||
* @dma_addr: bus address(accessed by dma) to allocated memory region.
|
||||
* - this address could be physical address without IOMMU and
|
||||
* device address with IOMMU.
|
||||
* @write: whether pages will be written to by the caller.
|
||||
* @sgt: sg table to transfer page data.
|
||||
* @pages: contain all pages to allocated memory region.
|
||||
* @page_size: could be 4K, 64K or 1MB.
|
||||
* @size: size of allocated memory region.
|
||||
* @pfnmap: indicate whether memory region from userptr is mmaped with
|
||||
* VM_PFNMAP or not.
|
||||
*/
|
||||
struct exynos_drm_gem_buf {
|
||||
void __iomem *kvaddr;
|
||||
unsigned long userptr;
|
||||
dma_addr_t dma_addr;
|
||||
struct dma_attrs dma_attrs;
|
||||
unsigned int write;
|
||||
struct sg_table *sgt;
|
||||
struct page **pages;
|
||||
unsigned long page_size;
|
||||
unsigned long size;
|
||||
bool pfnmap;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -66,6 +73,7 @@ struct exynos_drm_gem_buf {
|
|||
* or at framebuffer creation.
|
||||
* @size: size requested from user, in bytes and this size is aligned
|
||||
* in page unit.
|
||||
* @vma: a pointer to vm_area.
|
||||
* @flags: indicate memory type to allocated buffer and cache attruibute.
|
||||
*
|
||||
* P.S. this object would be transfered to user as kms_bo.handle so
|
||||
|
@ -75,6 +83,7 @@ struct exynos_drm_gem_obj {
|
|||
struct drm_gem_object base;
|
||||
struct exynos_drm_gem_buf *buffer;
|
||||
unsigned long size;
|
||||
struct vm_area_struct *vma;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
|
@ -129,6 +138,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
|
|||
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* map user space allocated by malloc to pages. */
|
||||
int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
/* get buffer information to memory region allocated by gem. */
|
||||
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
@ -164,4 +177,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
|||
/* set vm_flags and we can change the vm attribute to other one at here. */
|
||||
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
static inline int vma_is_io(struct vm_area_struct *vma)
|
||||
{
|
||||
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
|
||||
}
|
||||
|
||||
/* get a copy of a virtual memory region. */
|
||||
struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
|
||||
|
||||
/* release a userspace virtual memory area. */
|
||||
void exynos_gem_put_vma(struct vm_area_struct *vma);
|
||||
|
||||
/* get pages from user space. */
|
||||
int exynos_gem_get_pages_from_userptr(unsigned long start,
|
||||
unsigned int npages,
|
||||
struct page **pages,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/* drop the reference to pages. */
|
||||
void exynos_gem_put_pages_to_userptr(struct page **pages,
|
||||
unsigned int npages,
|
||||
struct vm_area_struct *vma);
|
||||
|
||||
/* map sgt with dma region. */
|
||||
int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
/* unmap sgt from dma region. */
|
||||
void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
|
|||
__u32 data;
|
||||
};
|
||||
|
||||
enum drm_exynos_g2d_buf_type {
|
||||
G2D_BUF_USERPTR = 1 << 31,
|
||||
};
|
||||
|
||||
enum drm_exynos_g2d_event_type {
|
||||
G2D_EVENT_NOT,
|
||||
G2D_EVENT_NONSTOP,
|
||||
G2D_EVENT_STOP, /* not yet */
|
||||
};
|
||||
|
||||
struct drm_exynos_g2d_userptr {
|
||||
unsigned long userptr;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
struct drm_exynos_g2d_set_cmdlist {
|
||||
__u64 cmd;
|
||||
__u64 cmd_gem;
|
||||
__u64 cmd_buf;
|
||||
__u32 cmd_nr;
|
||||
__u32 cmd_gem_nr;
|
||||
__u32 cmd_buf_nr;
|
||||
|
||||
/* for g2d event */
|
||||
__u64 event_type;
|
||||
|
|
Loading…
Reference in New Issue