mirror of https://gitee.com/openkylin/linux.git
dma-mapping: consolidate dma_{alloc,free}_{attrs,coherent}
Since 2009 we have a nice asm-generic header implementing lots of DMA API functions for architectures using struct dma_map_ops, but unfortunately it's still missing a lot of APIs that all architectures still have to duplicate. This series consolidates the remaining functions, although we still need arch opt outs for two of them as a few architectures have very non-standard implementations. This patch (of 5): The coherent DMA allocator works the same over all architectures supporting dma_map operations. This patch consolidates them and converges the minor differences: - the debug_dma helpers are now called from all architectures, including those that were previously missing them - dma_alloc_from_coherent and dma_release_from_coherent are now always called from the generic alloc/free routines instead of the ops dma-mapping-common.h always includes dma-coherent.h to get the defintions for them, or the stubs if the architecture doesn't support this feature - checks for ->alloc / ->free presence are removed. There is only one magic instead of dma_map_ops without them (mic_dma_ops) and that one is x86 only anyway. Besides that only x86 needs special treatment to replace a default devices if none is passed and tweak the gfp_flags. An optional arch hook is provided for that. [linux@roeck-us.net: fix build] [jcmvbkbc@gmail.com: fix xtensa] Signed-off-by: Christoph Hellwig <hch@lst.de> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Russell King <linux@arm.linux.org.uk> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Michal Simek <monstr@monstr.eu> Cc: Jonas Bonn <jonas@southpole.se> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Andy Shevchenko <andy.shevchenko@gmail.com> Signed-off-by: Guenter Roeck <linux@roeck-us.net> Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fb6dd5fa41
commit
6894258eda
|
@ -12,24 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#include <linux/dma-attrs.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
|
@ -209,21 +208,6 @@ extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
|
|||
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||
gfp_t gfp, struct dma_attrs *attrs);
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
BUG_ON(!ops);
|
||||
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_free - free memory allocated by arm_dma_alloc
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
|
@ -241,19 +225,6 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|||
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t handle, struct dma_attrs *attrs);
|
||||
|
||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
BUG_ON(!ops);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm_dma_mmap - map a coherent DMA allocation into user space
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
|
|
|
@ -676,10 +676,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||
void *memory;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, handle, &memory))
|
||||
return memory;
|
||||
|
||||
return __dma_alloc(dev, size, handle, gfp, prot, false,
|
||||
attrs, __builtin_return_address(0));
|
||||
|
@ -688,11 +684,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
void *memory;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, handle, &memory))
|
||||
return memory;
|
||||
|
||||
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
|
||||
attrs, __builtin_return_address(0));
|
||||
}
|
||||
|
@ -752,9 +743,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
||||
bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
|
||||
|
||||
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
|
||||
return;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (nommu()) {
|
||||
|
|
|
@ -22,8 +22,6 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
|
||||
|
@ -120,37 +118,6 @@ static inline void dma_mark_clean(void *addr, size_t size)
|
|||
{
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *vaddr;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
|
||||
return vaddr;
|
||||
|
||||
vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dev_addr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, vaddr, dev_addr);
|
||||
ops->free(dev, size, vaddr, dev_addr, attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* There is no dma_cache_sync() implementation, so just return NULL here.
|
||||
*/
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
#ifndef _H8300_DMA_MAPPING_H
|
||||
#define _H8300_DMA_MAPPING_H
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
|
||||
extern struct dma_map_ops h8300_dma_map_ops;
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
|
@ -25,30 +23,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
|
|||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
|
||||
memory = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
return memory;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -70,37 +70,4 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||
return (dma_addr == bad_dma_address);
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
ret = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -23,31 +23,6 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
|
|||
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *daddr, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
void *caddr;
|
||||
|
||||
caddr = ops->alloc(dev, size, daddr, gfp, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *daddr, caddr);
|
||||
return caddr;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *caddr, dma_addr_t daddr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
debug_dma_free_coherent(dev, size, caddr, daddr);
|
||||
ops->free(dev, size, caddr, daddr, attrs);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <linux/dma-debug.h>
|
||||
#include <linux/dma-attrs.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||
|
@ -102,36 +101,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
|
||||
BUG_ON(!ops);
|
||||
|
||||
memory = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
||||
return memory;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
|
|
@ -161,9 +161,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
{
|
||||
void *ret;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
||||
return ret;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
|
@ -194,11 +191,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
static void octeon_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
int order = get_order(size);
|
||||
|
||||
if (dma_release_from_coherent(dev, order, vaddr))
|
||||
return;
|
||||
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <asm/dma-coherence.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
|
||||
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
|
||||
#include <dma-coherence.h>
|
||||
|
@ -65,36 +64,6 @@ dma_set_mask(struct device *dev, u64 mask)
|
|||
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
||||
|
||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
|
||||
void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
|
||||
|
|
|
@ -14,9 +14,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
{
|
||||
void *ret;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
||||
return ret;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
|
@ -46,11 +43,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
static void loongson_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
int order = get_order(size);
|
||||
|
||||
if (dma_release_from_coherent(dev, order, vaddr))
|
||||
return;
|
||||
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
|
|
|
@ -137,9 +137,6 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
struct page *page = NULL;
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
||||
return ret;
|
||||
|
||||
gfp = massage_gfp_flags(dev, gfp);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
|
||||
|
@ -176,13 +173,9 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long addr = (unsigned long) vaddr;
|
||||
int order = get_order(size);
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct page *page = NULL;
|
||||
|
||||
if (dma_release_from_coherent(dev, order, vaddr))
|
||||
return;
|
||||
|
||||
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
|
||||
|
||||
if (!plat_device_is_coherent(dev) && !hw_coherentio)
|
||||
|
|
|
@ -47,11 +47,6 @@ static char *nlm_swiotlb;
|
|||
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
||||
return ret;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
|
@ -69,11 +64,6 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
static void nlm_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
int order = get_order(size);
|
||||
|
||||
if (dma_release_from_coherent(dev, order, vaddr))
|
||||
return;
|
||||
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/dma-debug.h>
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
|
@ -38,35 +37,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
|
||||
memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
|
|
|
@ -137,39 +137,6 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
|||
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
|
||||
extern u64 __dma_get_required_mask(struct device *dev);
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
|
|
@ -56,35 +56,4 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||
return dma_addr == DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
BUG_ON(!ops);
|
||||
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_DMA_MAPPING_H */
|
||||
|
|
|
@ -9,7 +9,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
return dma_ops;
|
||||
}
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
static inline int dma_supported(struct device *dev, u64 mask)
|
||||
|
@ -53,42 +52,6 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||
return dma_addr == 0;
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
|
||||
return memory;
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
|
||||
memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
if (ops->free)
|
||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
/* arch/sh/mm/consistent.c */
|
||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag,
|
||||
|
|
|
@ -41,32 +41,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
debug_dma_mapping_error(dev, dma_addr);
|
||||
|
|
|
@ -116,34 +116,7 @@ dma_set_mask(struct device *dev, u64 mask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
||||
|
||||
/*
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
|
@ -82,28 +80,6 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
return dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
}
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <linux/dma-attrs.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
|
@ -41,6 +40,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
#endif
|
||||
}
|
||||
|
||||
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
|
||||
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
|
||||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
/* Make sure we keep the same behaviour */
|
||||
|
@ -125,16 +127,4 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
|
|||
return gfp;
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
void *
|
||||
dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, struct dma_attrs *attrs);
|
||||
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t bus,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -140,50 +140,19 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
|
||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, struct dma_attrs *attrs)
|
||||
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
|
||||
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
||||
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
||||
if (!*dev)
|
||||
*dev = &x86_dma_fallback_dev;
|
||||
if (!is_device_dma_capable(*dev))
|
||||
return false;
|
||||
return true;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
|
||||
return memory;
|
||||
|
||||
if (!dev)
|
||||
dev = &x86_dma_fallback_dev;
|
||||
|
||||
if (!is_device_dma_capable(dev))
|
||||
return NULL;
|
||||
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
|
||||
memory = ops->alloc(dev, size, dma_handle,
|
||||
dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
||||
|
||||
return memory;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_alloc_attrs);
|
||||
|
||||
void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t bus,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
WARN_ON(irqs_disabled()); /* for portability */
|
||||
|
||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, vaddr, bus);
|
||||
if (ops->free)
|
||||
ops->free(dev, size, vaddr, bus, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_free_attrs);
|
||||
EXPORT_SYMBOL(arch_dma_alloc_attrs);
|
||||
|
||||
/*
|
||||
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
|
||||
|
|
|
@ -34,37 +34,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
||||
return ret;
|
||||
|
||||
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
||||
return;
|
||||
|
||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
|
|
|
@ -311,9 +311,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||
*/
|
||||
flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
|
||||
return ret;
|
||||
|
||||
/* On ARM this function returns an ioremap'ped virtual address for
|
||||
* which virt_to_phys doesn't return the corresponding physical
|
||||
* address. In fact on ARM virt_to_phys only works for kernel direct
|
||||
|
@ -356,9 +353,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|||
phys_addr_t phys;
|
||||
u64 dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
if (dma_release_from_coherent(hwdev, order, vaddr))
|
||||
return;
|
||||
|
||||
if (hwdev && hwdev->coherent_dma_mask)
|
||||
dma_mask = hwdev->coherent_dma_mask;
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/dma-attrs.h>
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
size_t size,
|
||||
|
@ -237,4 +238,61 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
|||
|
||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
|
||||
|
||||
#ifndef arch_dma_alloc_attrs
|
||||
#define arch_dma_alloc_attrs(dev, flag) (true)
|
||||
#endif
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
BUG_ON(!ops);
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
|
||||
return cpu_addr;
|
||||
|
||||
if (!arch_dma_alloc_attrs(&dev, &flag))
|
||||
return NULL;
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
|
||||
return;
|
||||
|
||||
if (!ops->free)
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
{
|
||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue