2013-03-22 22:34:08 +08:00
|
|
|
/*
|
|
|
|
* NVIDIA Tegra DRM GEM helper functions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 Sascha Hauer, Pengutronix
|
|
|
|
* Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved.
|
|
|
|
*
|
|
|
|
* Based on the GEM/CMA helpers
|
|
|
|
*
|
|
|
|
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
|
|
|
|
*
|
2014-02-11 22:52:01 +08:00
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
2013-03-22 22:34:08 +08:00
|
|
|
*/
|
|
|
|
|
2013-12-12 17:00:43 +08:00
|
|
|
#include <linux/dma-buf.h>
|
2014-06-27 03:41:53 +08:00
|
|
|
#include <linux/iommu.h>
|
2013-10-05 04:34:01 +08:00
|
|
|
#include <drm/tegra_drm.h>
|
|
|
|
|
2014-07-11 14:29:14 +08:00
|
|
|
#include "drm.h"
|
2013-03-22 22:34:08 +08:00
|
|
|
#include "gem.h"
|
|
|
|
|
2013-09-24 22:34:05 +08:00
|
|
|
static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
|
2013-03-22 22:34:08 +08:00
|
|
|
{
|
|
|
|
return container_of(bo, struct tegra_bo, base);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_bo_put(struct host1x_bo *bo)
|
|
|
|
{
|
2013-09-24 22:34:05 +08:00
|
|
|
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
2013-03-22 22:34:08 +08:00
|
|
|
|
2015-11-23 17:32:48 +08:00
|
|
|
drm_gem_object_unreference_unlocked(&obj->gem);
|
2013-03-22 22:34:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
|
|
|
|
{
|
2013-09-24 22:34:05 +08:00
|
|
|
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
2013-03-22 22:34:08 +08:00
|
|
|
|
|
|
|
return obj->paddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *tegra_bo_mmap(struct host1x_bo *bo)
|
|
|
|
{
|
2013-09-24 22:34:05 +08:00
|
|
|
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
2013-03-22 22:34:08 +08:00
|
|
|
|
|
|
|
return obj->vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *tegra_bo_kmap(struct host1x_bo *bo, unsigned int page)
|
|
|
|
{
|
2013-09-24 22:34:05 +08:00
|
|
|
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
2013-03-22 22:34:08 +08:00
|
|
|
|
|
|
|
return obj->vaddr + page * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_bo_kunmap(struct host1x_bo *bo, unsigned int page,
|
|
|
|
void *addr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
|
|
|
|
{
|
2013-09-24 22:34:05 +08:00
|
|
|
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
|
2013-03-22 22:34:08 +08:00
|
|
|
|
|
|
|
drm_gem_object_reference(&obj->gem);
|
|
|
|
|
|
|
|
return bo;
|
|
|
|
}
|
|
|
|
|
2013-12-12 17:10:46 +08:00
|
|
|
static const struct host1x_bo_ops tegra_bo_ops = {
|
2013-03-22 22:34:08 +08:00
|
|
|
.get = tegra_bo_get,
|
|
|
|
.put = tegra_bo_put,
|
|
|
|
.pin = tegra_bo_pin,
|
|
|
|
.unpin = tegra_bo_unpin,
|
|
|
|
.mmap = tegra_bo_mmap,
|
|
|
|
.munmap = tegra_bo_munmap,
|
|
|
|
.kmap = tegra_bo_kmap,
|
|
|
|
.kunmap = tegra_bo_kunmap,
|
|
|
|
};
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
|
|
|
|
{
|
|
|
|
int prot = IOMMU_READ | IOMMU_WRITE;
|
|
|
|
ssize_t err;
|
|
|
|
|
|
|
|
if (bo->mm)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
|
|
|
|
if (!bo->mm)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
|
|
|
|
PAGE_SIZE, 0, 0, 0);
|
|
|
|
if (err < 0) {
|
|
|
|
dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
|
|
|
|
err);
|
|
|
|
goto free;
|
|
|
|
}
|
|
|
|
|
|
|
|
bo->paddr = bo->mm->start;
|
|
|
|
|
2014-12-17 23:46:37 +08:00
|
|
|
err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
|
|
|
|
bo->sgt->nents, prot);
|
2014-06-27 03:41:53 +08:00
|
|
|
if (err < 0) {
|
|
|
|
dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
|
|
|
|
goto remove;
|
|
|
|
}
|
|
|
|
|
|
|
|
bo->size = err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
remove:
|
|
|
|
drm_mm_remove_node(bo->mm);
|
|
|
|
free:
|
|
|
|
kfree(bo->mm);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
|
|
|
|
{
|
|
|
|
if (!bo->mm)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
iommu_unmap(tegra->domain, bo->paddr, bo->size);
|
|
|
|
drm_mm_remove_node(bo->mm);
|
|
|
|
kfree(bo->mm);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-16 20:18:50 +08:00
|
|
|
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
|
|
|
|
size_t size)
|
|
|
|
{
|
|
|
|
struct tegra_bo *bo;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
|
|
|
if (!bo)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
host1x_bo_init(&bo->base, &tegra_bo_ops);
|
|
|
|
size = round_up(size, PAGE_SIZE);
|
|
|
|
|
|
|
|
err = drm_gem_object_init(drm, &bo->gem, size);
|
|
|
|
if (err < 0)
|
|
|
|
goto free;
|
|
|
|
|
|
|
|
err = drm_gem_create_mmap_offset(&bo->gem);
|
|
|
|
if (err < 0)
|
|
|
|
goto release;
|
|
|
|
|
|
|
|
return bo;
|
|
|
|
|
|
|
|
release:
|
|
|
|
drm_gem_object_release(&bo->gem);
|
|
|
|
free:
|
|
|
|
kfree(bo);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
|
2013-03-22 22:34:08 +08:00
|
|
|
{
|
2014-06-27 03:41:53 +08:00
|
|
|
if (bo->pages) {
|
|
|
|
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
|
|
|
|
sg_free_table(bo->sgt);
|
|
|
|
kfree(bo->sgt);
|
2014-11-06 21:41:31 +08:00
|
|
|
} else if (bo->vaddr) {
|
dma, mm/pat: Rename dma_*_writecombine() to dma_*_wc()
Rename dma_*_writecombine() to dma_*_wc(), so that the naming
is coherent across the various write-combining APIs. Keep the
old names for compatibility for a while, these can be removed
at a later time. A guard is left to enable backporting of the
rename, and later remove of the old mapping defines seemlessly.
Build tested successfully with allmodconfig.
The following Coccinelle SmPL patch was used for this simple
transformation:
@ rename_dma_alloc_writecombine @
expression dev, size, dma_addr, gfp;
@@
-dma_alloc_writecombine(dev, size, dma_addr, gfp)
+dma_alloc_wc(dev, size, dma_addr, gfp)
@ rename_dma_free_writecombine @
expression dev, size, cpu_addr, dma_addr;
@@
-dma_free_writecombine(dev, size, cpu_addr, dma_addr)
+dma_free_wc(dev, size, cpu_addr, dma_addr)
@ rename_dma_mmap_writecombine @
expression dev, vma, cpu_addr, dma_addr, size;
@@
-dma_mmap_writecombine(dev, vma, cpu_addr, dma_addr, size)
+dma_mmap_wc(dev, vma, cpu_addr, dma_addr, size)
We also keep the old names as compatibility helpers, and
guard against their definition to make backporting easier.
Generated-by: Coccinelle SmPL
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Luis R. Rodriguez <mcgrof@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bhelgaas@google.com
Cc: bp@suse.de
Cc: dan.j.williams@intel.com
Cc: daniel.vetter@ffwll.ch
Cc: dhowells@redhat.com
Cc: julia.lawall@lip6.fr
Cc: konrad.wilk@oracle.com
Cc: linux-fbdev@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Cc: luto@amacapital.net
Cc: mst@redhat.com
Cc: tomi.valkeinen@ti.com
Cc: toshi.kani@hp.com
Cc: vinod.koul@intel.com
Cc: xen-devel@lists.xensource.com
Link: http://lkml.kernel.org/r/1453516462-4844-1-git-send-email-mcgrof@do-not-panic.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-01-23 10:34:22 +08:00
|
|
|
dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
|
2014-06-27 03:41:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-16 23:41:47 +08:00
|
|
|
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
|
2014-06-27 03:41:53 +08:00
|
|
|
{
|
2014-12-16 23:35:26 +08:00
|
|
|
struct scatterlist *s;
|
|
|
|
unsigned int i;
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
bo->pages = drm_gem_get_pages(&bo->gem);
|
|
|
|
if (IS_ERR(bo->pages))
|
|
|
|
return PTR_ERR(bo->pages);
|
|
|
|
|
2014-12-16 23:41:47 +08:00
|
|
|
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
|
2014-06-27 03:41:53 +08:00
|
|
|
|
2015-04-14 18:52:36 +08:00
|
|
|
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
|
|
|
|
if (IS_ERR(bo->sgt))
|
2014-12-16 23:35:26 +08:00
|
|
|
goto put_pages;
|
|
|
|
|
|
|
|
/*
|
2015-04-14 18:52:36 +08:00
|
|
|
* Fake up the SG table so that dma_sync_sg_for_device() can be used
|
|
|
|
* to flush the pages associated with it.
|
2014-12-16 23:35:26 +08:00
|
|
|
*
|
|
|
|
* TODO: Replace this by drm_clflash_sg() once it can be implemented
|
|
|
|
* without relying on symbols that are not exported.
|
|
|
|
*/
|
2015-04-14 18:52:36 +08:00
|
|
|
for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
|
2014-12-16 23:35:26 +08:00
|
|
|
sg_dma_address(s) = sg_phys(s);
|
|
|
|
|
2015-04-14 18:52:36 +08:00
|
|
|
dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
|
|
|
|
DMA_TO_DEVICE);
|
2014-12-16 23:35:26 +08:00
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
return 0;
|
2014-12-16 23:35:26 +08:00
|
|
|
|
|
|
|
put_pages:
|
|
|
|
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
|
2015-04-14 18:52:36 +08:00
|
|
|
return PTR_ERR(bo->sgt);
|
2014-06-27 03:41:53 +08:00
|
|
|
}
|
|
|
|
|
2014-12-16 23:41:47 +08:00
|
|
|
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
|
2014-06-27 03:41:53 +08:00
|
|
|
{
|
|
|
|
struct tegra_drm *tegra = drm->dev_private;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (tegra->domain) {
|
2014-12-16 23:41:47 +08:00
|
|
|
err = tegra_bo_get_pages(drm, bo);
|
2014-06-27 03:41:53 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
err = tegra_bo_iommu_map(tegra, bo);
|
|
|
|
if (err < 0) {
|
|
|
|
tegra_bo_free(drm, bo);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
} else {
|
2014-12-16 23:41:47 +08:00
|
|
|
size_t size = bo->gem.size;
|
|
|
|
|
dma, mm/pat: Rename dma_*_writecombine() to dma_*_wc()
Rename dma_*_writecombine() to dma_*_wc(), so that the naming
is coherent across the various write-combining APIs. Keep the
old names for compatibility for a while, these can be removed
at a later time. A guard is left to enable backporting of the
rename, and later remove of the old mapping defines seemlessly.
Build tested successfully with allmodconfig.
The following Coccinelle SmPL patch was used for this simple
transformation:
@ rename_dma_alloc_writecombine @
expression dev, size, dma_addr, gfp;
@@
-dma_alloc_writecombine(dev, size, dma_addr, gfp)
+dma_alloc_wc(dev, size, dma_addr, gfp)
@ rename_dma_free_writecombine @
expression dev, size, cpu_addr, dma_addr;
@@
-dma_free_writecombine(dev, size, cpu_addr, dma_addr)
+dma_free_wc(dev, size, cpu_addr, dma_addr)
@ rename_dma_mmap_writecombine @
expression dev, vma, cpu_addr, dma_addr, size;
@@
-dma_mmap_writecombine(dev, vma, cpu_addr, dma_addr, size)
+dma_mmap_wc(dev, vma, cpu_addr, dma_addr, size)
We also keep the old names as compatibility helpers, and
guard against their definition to make backporting easier.
Generated-by: Coccinelle SmPL
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Luis R. Rodriguez <mcgrof@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bhelgaas@google.com
Cc: bp@suse.de
Cc: dan.j.williams@intel.com
Cc: daniel.vetter@ffwll.ch
Cc: dhowells@redhat.com
Cc: julia.lawall@lip6.fr
Cc: konrad.wilk@oracle.com
Cc: linux-fbdev@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Cc: luto@amacapital.net
Cc: mst@redhat.com
Cc: tomi.valkeinen@ti.com
Cc: toshi.kani@hp.com
Cc: vinod.koul@intel.com
Cc: xen-devel@lists.xensource.com
Link: http://lkml.kernel.org/r/1453516462-4844-1-git-send-email-mcgrof@do-not-panic.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-01-23 10:34:22 +08:00
|
|
|
bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
|
|
|
|
GFP_KERNEL | __GFP_NOWARN);
|
2014-06-27 03:41:53 +08:00
|
|
|
if (!bo->vaddr) {
|
|
|
|
dev_err(drm->dev,
|
|
|
|
"failed to allocate buffer of size %zu\n",
|
|
|
|
size);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2013-03-22 22:34:08 +08:00
|
|
|
}
|
|
|
|
|
2014-11-03 20:23:02 +08:00
|
|
|
struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
|
2013-10-05 04:34:01 +08:00
|
|
|
unsigned long flags)
|
2013-03-22 22:34:08 +08:00
|
|
|
{
|
|
|
|
struct tegra_bo *bo;
|
|
|
|
int err;
|
|
|
|
|
2014-10-16 20:18:50 +08:00
|
|
|
bo = tegra_bo_alloc_object(drm, size);
|
|
|
|
if (IS_ERR(bo))
|
|
|
|
return bo;
|
2013-03-22 22:34:08 +08:00
|
|
|
|
2014-12-16 23:41:47 +08:00
|
|
|
err = tegra_bo_alloc(drm, bo);
|
2014-06-27 03:41:53 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto release;
|
2013-03-22 22:34:08 +08:00
|
|
|
|
2013-10-05 04:34:01 +08:00
|
|
|
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
|
2014-06-03 20:48:12 +08:00
|
|
|
bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
|
2013-10-05 04:34:01 +08:00
|
|
|
|
2013-10-07 15:47:58 +08:00
|
|
|
if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
|
|
|
|
bo->flags |= TEGRA_BO_BOTTOM_UP;
|
|
|
|
|
2013-03-22 22:34:08 +08:00
|
|
|
return bo;
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
release:
|
|
|
|
drm_gem_object_release(&bo->gem);
|
2013-03-22 22:34:08 +08:00
|
|
|
kfree(bo);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
|
2013-09-24 22:34:05 +08:00
|
|
|
struct drm_device *drm,
|
2014-11-03 20:23:02 +08:00
|
|
|
size_t size,
|
2013-10-05 04:34:01 +08:00
|
|
|
unsigned long flags,
|
2014-11-03 20:23:02 +08:00
|
|
|
u32 *handle)
|
2013-03-22 22:34:08 +08:00
|
|
|
{
|
|
|
|
struct tegra_bo *bo;
|
2014-10-16 20:22:50 +08:00
|
|
|
int err;
|
2013-03-22 22:34:08 +08:00
|
|
|
|
2013-10-05 04:34:01 +08:00
|
|
|
bo = tegra_bo_create(drm, size, flags);
|
2013-03-22 22:34:08 +08:00
|
|
|
if (IS_ERR(bo))
|
|
|
|
return bo;
|
|
|
|
|
2014-10-16 20:22:50 +08:00
|
|
|
err = drm_gem_handle_create(file, &bo->gem, handle);
|
|
|
|
if (err) {
|
|
|
|
tegra_bo_free_object(&bo->gem);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
2013-03-22 22:34:08 +08:00
|
|
|
|
|
|
|
drm_gem_object_unreference_unlocked(&bo->gem);
|
|
|
|
|
|
|
|
return bo;
|
|
|
|
}
|
|
|
|
|
2014-05-13 22:46:11 +08:00
|
|
|
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
|
|
|
|
struct dma_buf *buf)
|
2013-12-12 17:00:43 +08:00
|
|
|
{
|
2014-06-27 03:41:53 +08:00
|
|
|
struct tegra_drm *tegra = drm->dev_private;
|
2013-12-12 17:00:43 +08:00
|
|
|
struct dma_buf_attachment *attach;
|
|
|
|
struct tegra_bo *bo;
|
|
|
|
int err;
|
|
|
|
|
2014-10-16 20:18:50 +08:00
|
|
|
bo = tegra_bo_alloc_object(drm, buf->size);
|
|
|
|
if (IS_ERR(bo))
|
|
|
|
return bo;
|
2013-12-12 17:00:43 +08:00
|
|
|
|
|
|
|
attach = dma_buf_attach(buf, drm->dev);
|
|
|
|
if (IS_ERR(attach)) {
|
|
|
|
err = PTR_ERR(attach);
|
2014-10-16 20:18:50 +08:00
|
|
|
goto free;
|
2013-12-12 17:00:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
get_dma_buf(buf);
|
|
|
|
|
|
|
|
bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
|
|
|
|
if (IS_ERR(bo->sgt)) {
|
|
|
|
err = PTR_ERR(bo->sgt);
|
|
|
|
goto detach;
|
|
|
|
}
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
if (tegra->domain) {
|
|
|
|
err = tegra_bo_iommu_map(tegra, bo);
|
|
|
|
if (err < 0)
|
|
|
|
goto detach;
|
|
|
|
} else {
|
|
|
|
if (bo->sgt->nents > 1) {
|
|
|
|
err = -EINVAL;
|
|
|
|
goto detach;
|
|
|
|
}
|
|
|
|
|
|
|
|
bo->paddr = sg_dma_address(bo->sgt->sgl);
|
2013-12-12 17:00:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bo->gem.import_attach = attach;
|
|
|
|
|
|
|
|
return bo;
|
|
|
|
|
|
|
|
detach:
|
|
|
|
if (!IS_ERR_OR_NULL(bo->sgt))
|
|
|
|
dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
dma_buf_detach(buf, attach);
|
|
|
|
dma_buf_put(buf);
|
|
|
|
free:
|
2014-10-16 20:18:50 +08:00
|
|
|
drm_gem_object_release(&bo->gem);
|
2013-12-12 17:00:43 +08:00
|
|
|
kfree(bo);
|
|
|
|
return ERR_PTR(err);
|
|
|
|
}
|
|
|
|
|
2013-03-22 22:34:08 +08:00
|
|
|
void tegra_bo_free_object(struct drm_gem_object *gem)
|
|
|
|
{
|
2014-06-27 03:41:53 +08:00
|
|
|
struct tegra_drm *tegra = gem->dev->dev_private;
|
2013-03-22 22:34:08 +08:00
|
|
|
struct tegra_bo *bo = to_tegra_bo(gem);
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
if (tegra->domain)
|
|
|
|
tegra_bo_iommu_unmap(tegra, bo);
|
|
|
|
|
2013-12-12 17:00:43 +08:00
|
|
|
if (gem->import_attach) {
|
|
|
|
dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
drm_prime_gem_destroy(gem, NULL);
|
|
|
|
} else {
|
2014-06-27 03:41:53 +08:00
|
|
|
tegra_bo_free(gem->dev, bo);
|
2013-12-12 17:00:43 +08:00
|
|
|
}
|
|
|
|
|
2013-03-22 22:34:08 +08:00
|
|
|
drm_gem_object_release(gem);
|
|
|
|
kfree(bo);
|
|
|
|
}
|
|
|
|
|
|
|
|
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
|
|
|
|
struct drm_mode_create_dumb *args)
|
|
|
|
{
|
2014-10-30 22:32:56 +08:00
|
|
|
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
|
2014-07-11 14:29:14 +08:00
|
|
|
struct tegra_drm *tegra = drm->dev_private;
|
2013-03-22 22:34:08 +08:00
|
|
|
struct tegra_bo *bo;
|
|
|
|
|
2014-10-30 22:32:56 +08:00
|
|
|
args->pitch = round_up(min_pitch, tegra->pitch_align);
|
|
|
|
args->size = args->pitch * args->height;
|
2013-03-22 22:34:08 +08:00
|
|
|
|
2013-10-05 04:34:01 +08:00
|
|
|
bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
|
2013-09-24 22:34:05 +08:00
|
|
|
&args->handle);
|
2013-03-22 22:34:08 +08:00
|
|
|
if (IS_ERR(bo))
|
|
|
|
return PTR_ERR(bo);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
|
2014-11-03 20:23:02 +08:00
|
|
|
u32 handle, u64 *offset)
|
2013-03-22 22:34:08 +08:00
|
|
|
{
|
|
|
|
struct drm_gem_object *gem;
|
|
|
|
struct tegra_bo *bo;
|
|
|
|
|
2016-05-09 18:04:54 +08:00
|
|
|
gem = drm_gem_object_lookup(file, handle);
|
2013-03-22 22:34:08 +08:00
|
|
|
if (!gem) {
|
|
|
|
dev_err(drm->dev, "failed to lookup GEM object\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
bo = to_tegra_bo(gem);
|
|
|
|
|
2013-08-13 20:19:58 +08:00
|
|
|
*offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
|
2013-03-22 22:34:08 +08:00
|
|
|
|
2015-11-23 17:32:47 +08:00
|
|
|
drm_gem_object_unreference_unlocked(gem);
|
2013-03-22 22:34:08 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *gem = vma->vm_private_data;
|
|
|
|
struct tegra_bo *bo = to_tegra_bo(gem);
|
|
|
|
struct page *page;
|
|
|
|
pgoff_t offset;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!bo->pages)
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
|
|
|
|
offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
|
|
|
|
page = bo->pages[offset];
|
|
|
|
|
|
|
|
err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
|
|
|
|
switch (err) {
|
|
|
|
case -EAGAIN:
|
|
|
|
case 0:
|
|
|
|
case -ERESTARTSYS:
|
|
|
|
case -EINTR:
|
|
|
|
case -EBUSY:
|
|
|
|
return VM_FAULT_NOPAGE;
|
|
|
|
|
|
|
|
case -ENOMEM:
|
|
|
|
return VM_FAULT_OOM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
}
|
|
|
|
|
2013-03-22 22:34:08 +08:00
|
|
|
const struct vm_operations_struct tegra_bo_vm_ops = {
|
2014-06-27 03:41:53 +08:00
|
|
|
.fault = tegra_bo_fault,
|
2013-03-22 22:34:08 +08:00
|
|
|
.open = drm_gem_vm_open,
|
|
|
|
.close = drm_gem_vm_close,
|
|
|
|
};
|
|
|
|
|
|
|
|
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *gem;
|
|
|
|
struct tegra_bo *bo;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = drm_gem_mmap(file, vma);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
gem = vma->vm_private_data;
|
|
|
|
bo = to_tegra_bo(gem);
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
if (!bo->pages) {
|
|
|
|
unsigned long vm_pgoff = vma->vm_pgoff;
|
2014-09-24 22:14:04 +08:00
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
vma->vm_flags &= ~VM_PFNMAP;
|
|
|
|
vma->vm_pgoff = 0;
|
|
|
|
|
dma, mm/pat: Rename dma_*_writecombine() to dma_*_wc()
Rename dma_*_writecombine() to dma_*_wc(), so that the naming
is coherent across the various write-combining APIs. Keep the
old names for compatibility for a while, these can be removed
at a later time. A guard is left to enable backporting of the
rename, and later remove of the old mapping defines seemlessly.
Build tested successfully with allmodconfig.
The following Coccinelle SmPL patch was used for this simple
transformation:
@ rename_dma_alloc_writecombine @
expression dev, size, dma_addr, gfp;
@@
-dma_alloc_writecombine(dev, size, dma_addr, gfp)
+dma_alloc_wc(dev, size, dma_addr, gfp)
@ rename_dma_free_writecombine @
expression dev, size, cpu_addr, dma_addr;
@@
-dma_free_writecombine(dev, size, cpu_addr, dma_addr)
+dma_free_wc(dev, size, cpu_addr, dma_addr)
@ rename_dma_mmap_writecombine @
expression dev, vma, cpu_addr, dma_addr, size;
@@
-dma_mmap_writecombine(dev, vma, cpu_addr, dma_addr, size)
+dma_mmap_wc(dev, vma, cpu_addr, dma_addr, size)
We also keep the old names as compatibility helpers, and
guard against their definition to make backporting easier.
Generated-by: Coccinelle SmPL
Suggested-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Luis R. Rodriguez <mcgrof@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: airlied@linux.ie
Cc: akpm@linux-foundation.org
Cc: benh@kernel.crashing.org
Cc: bhelgaas@google.com
Cc: bp@suse.de
Cc: dan.j.williams@intel.com
Cc: daniel.vetter@ffwll.ch
Cc: dhowells@redhat.com
Cc: julia.lawall@lip6.fr
Cc: konrad.wilk@oracle.com
Cc: linux-fbdev@vger.kernel.org
Cc: linux-pci@vger.kernel.org
Cc: luto@amacapital.net
Cc: mst@redhat.com
Cc: tomi.valkeinen@ti.com
Cc: toshi.kani@hp.com
Cc: vinod.koul@intel.com
Cc: xen-devel@lists.xensource.com
Link: http://lkml.kernel.org/r/1453516462-4844-1-git-send-email-mcgrof@do-not-panic.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-01-23 10:34:22 +08:00
|
|
|
ret = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
|
|
|
|
gem->size);
|
2014-06-27 03:41:53 +08:00
|
|
|
if (ret) {
|
|
|
|
drm_gem_vm_close(vma);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
vma->vm_pgoff = vm_pgoff;
|
|
|
|
} else {
|
|
|
|
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
|
2014-09-24 22:14:04 +08:00
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
vma->vm_flags |= VM_MIXEDMAP;
|
|
|
|
vma->vm_flags &= ~VM_PFNMAP;
|
|
|
|
|
|
|
|
vma->vm_page_prot = pgprot_writecombine(prot);
|
|
|
|
}
|
2013-03-22 22:34:08 +08:00
|
|
|
|
2014-09-24 22:14:04 +08:00
|
|
|
return 0;
|
2013-03-22 22:34:08 +08:00
|
|
|
}
|
2013-12-12 17:00:43 +08:00
|
|
|
|
|
|
|
static struct sg_table *
|
|
|
|
tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *gem = attach->dmabuf->priv;
|
|
|
|
struct tegra_bo *bo = to_tegra_bo(gem);
|
|
|
|
struct sg_table *sgt;
|
|
|
|
|
|
|
|
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
|
|
|
|
if (!sgt)
|
|
|
|
return NULL;
|
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
if (bo->pages) {
|
|
|
|
struct scatterlist *sg;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
|
|
|
|
goto free;
|
2013-12-12 17:00:43 +08:00
|
|
|
|
2014-06-27 03:41:53 +08:00
|
|
|
for_each_sg(sgt->sgl, sg, bo->num_pages, i)
|
|
|
|
sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
|
|
|
|
|
|
|
|
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
|
|
|
|
goto free;
|
|
|
|
} else {
|
|
|
|
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
|
|
|
|
goto free;
|
|
|
|
|
|
|
|
sg_dma_address(sgt->sgl) = bo->paddr;
|
|
|
|
sg_dma_len(sgt->sgl) = gem->size;
|
|
|
|
}
|
2013-12-12 17:00:43 +08:00
|
|
|
|
|
|
|
return sgt;
|
2014-06-27 03:41:53 +08:00
|
|
|
|
|
|
|
free:
|
|
|
|
sg_free_table(sgt);
|
|
|
|
kfree(sgt);
|
|
|
|
return NULL;
|
2013-12-12 17:00:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
|
|
|
|
struct sg_table *sgt,
|
|
|
|
enum dma_data_direction dir)
|
|
|
|
{
|
2014-06-27 03:41:53 +08:00
|
|
|
struct drm_gem_object *gem = attach->dmabuf->priv;
|
|
|
|
struct tegra_bo *bo = to_tegra_bo(gem);
|
|
|
|
|
|
|
|
if (bo->pages)
|
|
|
|
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
|
|
|
|
|
2013-12-12 17:00:43 +08:00
|
|
|
sg_free_table(sgt);
|
|
|
|
kfree(sgt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_gem_prime_release(struct dma_buf *buf)
|
|
|
|
{
|
|
|
|
drm_gem_dmabuf_release(buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
|
|
|
|
unsigned long page)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_gem_prime_kunmap_atomic(struct dma_buf *buf,
|
|
|
|
unsigned long page,
|
|
|
|
void *addr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *tegra_gem_prime_kmap(struct dma_buf *buf, unsigned long page)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
|
|
|
|
void *addr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-01-30 03:32:33 +08:00
|
|
|
static void *tegra_gem_prime_vmap(struct dma_buf *buf)
|
|
|
|
{
|
|
|
|
struct drm_gem_object *gem = buf->priv;
|
|
|
|
struct tegra_bo *bo = to_tegra_bo(gem);
|
|
|
|
|
|
|
|
return bo->vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2013-12-12 17:00:43 +08:00
|
|
|
static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
|
|
|
|
.map_dma_buf = tegra_gem_prime_map_dma_buf,
|
|
|
|
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
|
|
|
|
.release = tegra_gem_prime_release,
|
|
|
|
.kmap_atomic = tegra_gem_prime_kmap_atomic,
|
|
|
|
.kunmap_atomic = tegra_gem_prime_kunmap_atomic,
|
|
|
|
.kmap = tegra_gem_prime_kmap,
|
|
|
|
.kunmap = tegra_gem_prime_kunmap,
|
|
|
|
.mmap = tegra_gem_prime_mmap,
|
2014-01-30 03:32:33 +08:00
|
|
|
.vmap = tegra_gem_prime_vmap,
|
|
|
|
.vunmap = tegra_gem_prime_vunmap,
|
2013-12-12 17:00:43 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
|
|
|
|
struct drm_gem_object *gem,
|
|
|
|
int flags)
|
|
|
|
{
|
2015-01-23 15:23:43 +08:00
|
|
|
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
|
|
|
|
|
|
|
exp_info.ops = &tegra_gem_prime_dmabuf_ops;
|
|
|
|
exp_info.size = gem->size;
|
|
|
|
exp_info.flags = flags;
|
|
|
|
exp_info.priv = gem;
|
|
|
|
|
2016-10-05 20:21:44 +08:00
|
|
|
return drm_gem_dmabuf_export(drm, &exp_info);
|
2013-12-12 17:00:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
|
|
|
|
struct dma_buf *buf)
|
|
|
|
{
|
|
|
|
struct tegra_bo *bo;
|
|
|
|
|
|
|
|
if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
|
|
|
|
struct drm_gem_object *gem = buf->priv;
|
|
|
|
|
|
|
|
if (gem->dev == drm) {
|
|
|
|
drm_gem_object_reference(gem);
|
|
|
|
return gem;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bo = tegra_bo_import(drm, buf);
|
|
|
|
if (IS_ERR(bo))
|
|
|
|
return ERR_CAST(bo);
|
|
|
|
|
|
|
|
return &bo->gem;
|
|
|
|
}
|