2008-06-29 18:18:46 +08:00
|
|
|
/*
|
|
|
|
* Coherent per-device memory handling.
|
|
|
|
* Borrowed from i386
|
|
|
|
*/
|
2016-03-23 05:28:03 +08:00
|
|
|
#include <linux/io.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2008-06-29 18:18:46 +08:00
|
|
|
#include <linux/kernel.h>
|
2011-07-02 04:07:32 +08:00
|
|
|
#include <linux/module.h>
|
2008-06-29 18:18:46 +08:00
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
|
|
|
|
struct dma_coherent_mem {
|
|
|
|
void *virt_base;
|
2010-05-31 18:03:04 +08:00
|
|
|
dma_addr_t device_base;
|
2014-05-21 06:54:22 +08:00
|
|
|
unsigned long pfn_base;
|
2008-06-29 18:18:46 +08:00
|
|
|
int size;
|
|
|
|
int flags;
|
|
|
|
unsigned long *bitmap;
|
2014-10-14 06:51:07 +08:00
|
|
|
spinlock_t spinlock;
|
2017-06-26 17:18:57 +08:00
|
|
|
bool use_dev_dma_pfn_offset;
|
2008-06-29 18:18:46 +08:00
|
|
|
};
|
|
|
|
|
2017-06-26 17:18:58 +08:00
|
|
|
static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
|
|
|
|
|
|
|
|
static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
|
|
|
|
{
|
|
|
|
if (dev && dev->dma_mem)
|
|
|
|
return dev->dma_mem;
|
2017-07-20 18:19:58 +08:00
|
|
|
return NULL;
|
2017-06-26 17:18:58 +08:00
|
|
|
}
|
|
|
|
|
2017-06-26 17:18:57 +08:00
|
|
|
static inline dma_addr_t dma_get_device_base(struct device *dev,
|
|
|
|
struct dma_coherent_mem * mem)
|
|
|
|
{
|
|
|
|
if (mem->use_dev_dma_pfn_offset)
|
|
|
|
return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
|
|
|
|
else
|
|
|
|
return mem->device_base;
|
|
|
|
}
|
|
|
|
|
2016-01-05 05:36:40 +08:00
|
|
|
static bool dma_init_coherent_memory(
|
|
|
|
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
|
|
|
|
struct dma_coherent_mem **mem)
|
2008-06-29 18:18:46 +08:00
|
|
|
{
|
2014-10-14 06:51:07 +08:00
|
|
|
struct dma_coherent_mem *dma_mem = NULL;
|
2008-06-29 18:18:46 +08:00
|
|
|
void __iomem *mem_base = NULL;
|
|
|
|
int pages = size >> PAGE_SHIFT;
|
|
|
|
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
|
|
|
|
|
|
|
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
|
|
|
goto out;
|
|
|
|
if (!size)
|
|
|
|
goto out;
|
|
|
|
|
2016-03-23 05:28:03 +08:00
|
|
|
if (flags & DMA_MEMORY_MAP)
|
|
|
|
mem_base = memremap(phys_addr, size, MEMREMAP_WC);
|
|
|
|
else
|
|
|
|
mem_base = ioremap(phys_addr, size);
|
2008-06-29 18:18:46 +08:00
|
|
|
if (!mem_base)
|
|
|
|
goto out;
|
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
|
|
|
if (!dma_mem)
|
2008-06-29 18:18:46 +08:00
|
|
|
goto out;
|
2014-10-14 06:51:07 +08:00
|
|
|
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
|
|
|
if (!dma_mem->bitmap)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
dma_mem->virt_base = mem_base;
|
|
|
|
dma_mem->device_base = device_addr;
|
|
|
|
dma_mem->pfn_base = PFN_DOWN(phys_addr);
|
|
|
|
dma_mem->size = pages;
|
|
|
|
dma_mem->flags = flags;
|
|
|
|
spin_lock_init(&dma_mem->spinlock);
|
2008-06-29 18:18:46 +08:00
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
*mem = dma_mem;
|
2016-01-05 05:36:40 +08:00
|
|
|
return true;
|
2008-06-29 18:18:46 +08:00
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
out:
|
|
|
|
kfree(dma_mem);
|
2016-03-23 05:28:03 +08:00
|
|
|
if (mem_base) {
|
|
|
|
if (flags & DMA_MEMORY_MAP)
|
|
|
|
memunmap(mem_base);
|
|
|
|
else
|
|
|
|
iounmap(mem_base);
|
|
|
|
}
|
2016-01-05 05:36:40 +08:00
|
|
|
return false;
|
2008-06-29 18:18:46 +08:00
|
|
|
}
|
2014-10-14 06:51:07 +08:00
|
|
|
|
|
|
|
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
|
|
|
|
{
|
|
|
|
if (!mem)
|
|
|
|
return;
|
2016-03-23 05:28:03 +08:00
|
|
|
|
|
|
|
if (mem->flags & DMA_MEMORY_MAP)
|
|
|
|
memunmap(mem->virt_base);
|
|
|
|
else
|
|
|
|
iounmap(mem->virt_base);
|
2014-10-14 06:51:07 +08:00
|
|
|
kfree(mem->bitmap);
|
|
|
|
kfree(mem);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dma_assign_coherent_memory(struct device *dev,
|
|
|
|
struct dma_coherent_mem *mem)
|
|
|
|
{
|
2017-06-26 17:18:58 +08:00
|
|
|
if (!dev)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
if (dev->dma_mem)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
dev->dma_mem = mem;
|
|
|
|
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
|
|
|
|
dma_addr_t device_addr, size_t size, int flags)
|
|
|
|
{
|
|
|
|
struct dma_coherent_mem *mem;
|
|
|
|
|
2016-01-05 05:36:40 +08:00
|
|
|
if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
|
|
|
|
&mem))
|
2014-10-14 06:51:07 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (dma_assign_coherent_memory(dev, mem) == 0)
|
2016-01-05 05:36:40 +08:00
|
|
|
return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
|
2014-10-14 06:51:07 +08:00
|
|
|
|
|
|
|
dma_release_coherent_memory(mem);
|
|
|
|
return 0;
|
|
|
|
}
|
2008-06-29 18:18:46 +08:00
|
|
|
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
|
|
|
|
|
|
|
void dma_release_declared_memory(struct device *dev)
|
|
|
|
{
|
|
|
|
struct dma_coherent_mem *mem = dev->dma_mem;
|
|
|
|
|
|
|
|
if (!mem)
|
|
|
|
return;
|
2014-10-14 06:51:07 +08:00
|
|
|
dma_release_coherent_memory(mem);
|
2008-06-29 18:18:46 +08:00
|
|
|
dev->dma_mem = NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dma_release_declared_memory);
|
|
|
|
|
|
|
|
void *dma_mark_declared_memory_occupied(struct device *dev,
|
|
|
|
dma_addr_t device_addr, size_t size)
|
|
|
|
{
|
|
|
|
struct dma_coherent_mem *mem = dev->dma_mem;
|
2014-10-14 06:51:07 +08:00
|
|
|
unsigned long flags;
|
2008-06-29 18:18:46 +08:00
|
|
|
int pos, err;
|
|
|
|
|
2008-08-06 04:01:31 +08:00
|
|
|
size += device_addr & ~PAGE_MASK;
|
2008-06-29 18:18:46 +08:00
|
|
|
|
|
|
|
if (!mem)
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
spin_lock_irqsave(&mem->spinlock, flags);
|
2017-06-26 17:18:57 +08:00
|
|
|
pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
|
2008-08-06 04:01:31 +08:00
|
|
|
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
|
2014-10-14 06:51:07 +08:00
|
|
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
|
|
|
|
2008-06-29 18:18:46 +08:00
|
|
|
if (err != 0)
|
|
|
|
return ERR_PTR(err);
|
|
|
|
return mem->virt_base + (pos << PAGE_SHIFT);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
|
|
|
|
2017-07-20 18:19:58 +08:00
|
|
|
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
|
|
|
|
ssize_t size, dma_addr_t *dma_handle)
|
2008-06-29 18:18:46 +08:00
|
|
|
{
|
|
|
|
int order = get_order(size);
|
2014-10-14 06:51:07 +08:00
|
|
|
unsigned long flags;
|
2009-01-07 06:43:09 +08:00
|
|
|
int pageno;
|
2016-09-28 15:51:57 +08:00
|
|
|
int dma_memory_map;
|
2017-07-20 18:19:58 +08:00
|
|
|
void *ret;
|
2008-06-29 18:18:46 +08:00
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
spin_lock_irqsave(&mem->spinlock, flags);
|
2009-01-21 17:51:53 +08:00
|
|
|
|
2009-01-21 17:47:38 +08:00
|
|
|
if (unlikely(size > (mem->size << PAGE_SHIFT)))
|
2009-01-21 17:51:53 +08:00
|
|
|
goto err;
|
2009-01-07 06:43:09 +08:00
|
|
|
|
|
|
|
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
2009-01-21 17:51:53 +08:00
|
|
|
if (unlikely(pageno < 0))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
/*
|
2017-07-20 18:19:58 +08:00
|
|
|
* Memory was found in the coherent area.
|
2009-01-21 17:51:53 +08:00
|
|
|
*/
|
2017-07-20 18:19:58 +08:00
|
|
|
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
|
|
|
ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
2016-09-28 15:51:57 +08:00
|
|
|
dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
|
|
|
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
|
|
|
if (dma_memory_map)
|
2017-07-20 18:19:58 +08:00
|
|
|
memset(ret, 0, size);
|
2016-03-23 05:28:06 +08:00
|
|
|
else
|
2017-07-20 18:19:58 +08:00
|
|
|
memset_io(ret, 0, size);
|
2009-01-21 17:51:53 +08:00
|
|
|
|
2017-07-20 18:19:58 +08:00
|
|
|
return ret;
|
2009-01-21 17:51:53 +08:00
|
|
|
|
|
|
|
err:
|
2014-10-14 06:51:07 +08:00
|
|
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
2017-07-20 18:19:58 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
|
|
|
|
* @dev: device from which we allocate memory
|
|
|
|
* @size: size of requested memory area
|
|
|
|
* @dma_handle: This will be filled with the correct dma handle
|
|
|
|
* @ret: This pointer will be filled with the virtual address
|
|
|
|
* to allocated area.
|
|
|
|
*
|
|
|
|
* This function should be only called from per-arch dma_alloc_coherent()
|
|
|
|
* to support allocation from per-device coherent memory pools.
|
|
|
|
*
|
|
|
|
* Returns 0 if dma_alloc_coherent should continue with allocating from
|
|
|
|
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
|
|
|
|
*/
|
|
|
|
int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
|
|
|
|
dma_addr_t *dma_handle, void **ret)
|
|
|
|
{
|
|
|
|
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
|
|
|
|
|
|
|
if (!mem)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
|
|
|
|
if (*ret)
|
|
|
|
return 1;
|
|
|
|
|
2009-01-21 17:51:53 +08:00
|
|
|
/*
|
|
|
|
* In the case where the allocation can not be satisfied from the
|
|
|
|
* per-device area, try to fall back to generic memory if the
|
|
|
|
* constraints allow it.
|
|
|
|
*/
|
|
|
|
return mem->flags & DMA_MEMORY_EXCLUSIVE;
|
2008-06-29 18:18:46 +08:00
|
|
|
}
|
2017-07-20 18:19:58 +08:00
|
|
|
EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
|
2008-06-29 18:18:46 +08:00
|
|
|
|
2017-07-20 18:19:58 +08:00
|
|
|
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
|
2008-06-29 18:18:46 +08:00
|
|
|
{
|
2017-07-20 18:19:58 +08:00
|
|
|
if (!dma_coherent_default_memory)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
|
|
|
|
dma_handle);
|
|
|
|
}
|
2008-06-29 18:18:46 +08:00
|
|
|
|
2017-07-20 18:19:58 +08:00
|
|
|
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
|
|
|
|
int order, void *vaddr)
|
|
|
|
{
|
2008-06-29 18:18:46 +08:00
|
|
|
if (mem && vaddr >= mem->virt_base && vaddr <
|
|
|
|
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
|
|
|
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
2014-10-14 06:51:07 +08:00
|
|
|
unsigned long flags;
|
2008-06-29 18:18:46 +08:00
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
spin_lock_irqsave(&mem->spinlock, flags);
|
2008-06-29 18:18:46 +08:00
|
|
|
bitmap_release_region(mem->bitmap, page, order);
|
2014-10-14 06:51:07 +08:00
|
|
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
2008-06-29 18:18:46 +08:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-23 20:05:14 +08:00
|
|
|
|
|
|
|
/**
|
2017-07-20 18:19:58 +08:00
|
|
|
* dma_release_from_dev_coherent() - free memory to device coherent memory pool
|
2012-03-23 20:05:14 +08:00
|
|
|
* @dev: device from which the memory was allocated
|
2017-07-20 18:19:58 +08:00
|
|
|
* @order: the order of pages allocated
|
|
|
|
* @vaddr: virtual address of allocated pages
|
2012-03-23 20:05:14 +08:00
|
|
|
*
|
|
|
|
* This checks whether the memory was allocated from the per-device
|
2017-07-20 18:19:58 +08:00
|
|
|
* coherent memory pool and if so, releases that memory.
|
2012-03-23 20:05:14 +08:00
|
|
|
*
|
2017-07-20 18:19:58 +08:00
|
|
|
* Returns 1 if we correctly released the memory, or 0 if the caller should
|
|
|
|
* proceed with releasing memory from generic pools.
|
2012-03-23 20:05:14 +08:00
|
|
|
*/
|
2017-07-20 18:19:58 +08:00
|
|
|
int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
|
2012-03-23 20:05:14 +08:00
|
|
|
{
|
2017-06-26 17:18:58 +08:00
|
|
|
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
2012-03-23 20:05:14 +08:00
|
|
|
|
2017-07-20 18:19:58 +08:00
|
|
|
return __dma_release_from_coherent(mem, order, vaddr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dma_release_from_dev_coherent);
|
|
|
|
|
|
|
|
int dma_release_from_global_coherent(int order, void *vaddr)
|
|
|
|
{
|
|
|
|
if (!dma_coherent_default_memory)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __dma_release_from_coherent(dma_coherent_default_memory, order,
|
|
|
|
vaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
|
|
|
|
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
|
|
|
|
{
|
2012-03-23 20:05:14 +08:00
|
|
|
if (mem && vaddr >= mem->virt_base && vaddr + size <=
|
|
|
|
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
|
|
|
unsigned long off = vma->vm_pgoff;
|
|
|
|
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
2016-05-21 21:22:57 +08:00
|
|
|
int user_count = vma_pages(vma);
|
2016-09-28 15:51:56 +08:00
|
|
|
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
2012-03-23 20:05:14 +08:00
|
|
|
|
|
|
|
*ret = -ENXIO;
|
|
|
|
if (off < count && user_count <= count - off) {
|
2014-05-21 06:54:22 +08:00
|
|
|
unsigned long pfn = mem->pfn_base + start + off;
|
2012-03-23 20:05:14 +08:00
|
|
|
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
|
|
|
|
user_count << PAGE_SHIFT,
|
|
|
|
vma->vm_page_prot);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2017-07-20 18:19:58 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
|
|
|
|
* @dev: device from which the memory was allocated
|
|
|
|
* @vma: vm_area for the userspace memory
|
|
|
|
* @vaddr: cpu address returned by dma_alloc_from_dev_coherent
|
|
|
|
* @size: size of the memory buffer allocated
|
|
|
|
* @ret: result from remap_pfn_range()
|
|
|
|
*
|
|
|
|
* This checks whether the memory was allocated from the per-device
|
|
|
|
* coherent memory pool and if so, maps that memory to the provided vma.
|
|
|
|
*
|
|
|
|
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
|
|
|
|
* proceed with mapping memory from generic pools.
|
|
|
|
*/
|
|
|
|
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
|
|
|
void *vaddr, size_t size, int *ret)
|
|
|
|
{
|
|
|
|
struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
|
|
|
|
|
|
|
|
return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
|
|
|
|
|
|
|
|
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
|
|
|
|
size_t size, int *ret)
|
|
|
|
{
|
|
|
|
if (!dma_coherent_default_memory)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
|
|
|
|
vaddr, size, ret);
|
|
|
|
}
|
2014-10-14 06:51:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Support for reserved memory regions defined in device tree
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_OF_RESERVED_MEM
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_fdt.h>
|
|
|
|
#include <linux/of_reserved_mem.h>
|
|
|
|
|
2017-06-26 17:18:58 +08:00
|
|
|
static struct reserved_mem *dma_reserved_default_memory __initdata;
|
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
|
|
|
|
{
|
|
|
|
struct dma_coherent_mem *mem = rmem->priv;
|
|
|
|
|
|
|
|
if (!mem &&
|
2016-01-05 05:36:40 +08:00
|
|
|
!dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
|
|
|
|
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
|
|
|
|
&mem)) {
|
2014-10-14 06:51:07 +08:00
|
|
|
pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
|
|
|
|
&rmem->base, (unsigned long)rmem->size / SZ_1M);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
2017-06-26 17:18:57 +08:00
|
|
|
mem->use_dev_dma_pfn_offset = true;
|
2014-10-14 06:51:07 +08:00
|
|
|
rmem->priv = mem;
|
|
|
|
dma_assign_coherent_memory(dev, mem);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rmem_dma_device_release(struct reserved_mem *rmem,
|
|
|
|
struct device *dev)
|
|
|
|
{
|
2017-06-26 17:18:58 +08:00
|
|
|
if (dev)
|
|
|
|
dev->dma_mem = NULL;
|
2014-10-14 06:51:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct reserved_mem_ops rmem_dma_ops = {
|
|
|
|
.device_init = rmem_dma_device_init,
|
|
|
|
.device_release = rmem_dma_device_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init rmem_dma_setup(struct reserved_mem *rmem)
|
|
|
|
{
|
|
|
|
unsigned long node = rmem->fdt_node;
|
|
|
|
|
|
|
|
if (of_get_flat_dt_prop(node, "reusable", NULL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM
|
|
|
|
if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
|
|
|
|
pr_err("Reserved memory: regions without no-map are not yet supported\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-06-26 17:18:58 +08:00
|
|
|
|
|
|
|
if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
|
|
|
|
WARN(dma_reserved_default_memory,
|
|
|
|
"Reserved memory: region for default DMA coherent area is redefined\n");
|
|
|
|
dma_reserved_default_memory = rmem;
|
|
|
|
}
|
2014-10-14 06:51:07 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
rmem->ops = &rmem_dma_ops;
|
|
|
|
pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
|
|
|
|
&rmem->base, (unsigned long)rmem->size / SZ_1M);
|
|
|
|
return 0;
|
|
|
|
}
|
2017-06-26 17:18:58 +08:00
|
|
|
|
|
|
|
static int __init dma_init_reserved_memory(void)
|
|
|
|
{
|
|
|
|
const struct reserved_mem_ops *ops;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!dma_reserved_default_memory)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ops = dma_reserved_default_memory->ops;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We rely on rmem_dma_device_init() does not propagate error of
|
|
|
|
* dma_assign_coherent_memory() for "NULL" device.
|
|
|
|
*/
|
|
|
|
ret = ops->device_init(dma_reserved_default_memory, NULL);
|
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
dma_coherent_default_memory = dma_reserved_default_memory->priv;
|
|
|
|
pr_info("DMA: default coherent area is set\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
core_initcall(dma_init_reserved_memory);
|
|
|
|
|
2014-10-14 06:51:07 +08:00
|
|
|
RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
|
|
|
|
#endif
|