dma-mapping updates for 5.7
- fix an integer overflow in the coherent pool (Kevin Grandemange) - provide support for in-place uncached remapping and use that for openrisc - fix the arm coherent allocator to take the bus limit into account -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl6IL38LHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYN8Pg/9FerEL9GoU7lSeXvvklAED5Ro3JVyaZdA4eQaYzG6 PnUYs+lFAsKGoE8qWbvue+pJs1JZtZ+oSp9uVrLGbPaMS+71iy/hCS+Cv9Ym0Y0u lciBlFEkN9qM1srBMn4vdBo4gIyailcylznQxzw/ZjU90vU4uUJj37YrfGxQnOd8 pHrD24wutwECksEp6nLx3/Yt2xW92j9/oH+FnEK5mfaA0ATAQMz51L9veyU6liV4 1A8jbi0diskAIqn4uyO5SuVg7s7C90HOe3JUBtk+oZvUXFlr9WvrDjCqBp6rcSiH XS+Z2RBomMSrjEHOfETcFu8JSyjY1eKu/a1rvEbmUc6bE/gKrEGgPiQwSLa+1Aty qy3I24uSF7xcs5yngnjzIQ/BizKFk/wzja15c4sfUNKiXLI6FqwwHL34Dg+Nv7UG A/eCXePzOGPVANcIU0Zh68epEfCJRqJtqy2BDrWisqRfhxd3rRgl9gNeS1JwR0El 9T5c+CKfXn1IVA3YhMABUYh1JJ9bXrlZIOd3PEPwvwYRBnIYxP6JK2R+4BYjsMHy Y90QyAUUsJKMWYq4p4EpSCUGSlnzl9I2QH3ItUHvo+T9NcT6Vo4J6tCTQZu5tUGM SPiV49Gxz3u2+5VBmolWixO6JpRBv+92gowWdxRULkFpMaOw8mPInWW5cWPWc2MY u/Y= =DrK0 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - fix an integer overflow in the coherent pool (Kevin Grandemange) - provide support for in-place uncached remapping and use that for openrisc - fix the arm coherent allocator to take the bus limit into account * tag 'dma-mapping-5.7' of git://git.infradead.org/users/hch/dma-mapping: ARM/dma-mapping: merge __dma_supported into arm_dma_supported ARM/dma-mapping: take the bus limit into account in __dma_alloc ARM/dma-mapping: remove get_coherent_dma_mask openrisc: use the generic in-place uncached DMA allocator dma-direct: provide a arch_dma_clear_uncached hook dma-direct: make uncached_kernel_address more general dma-direct: consolidate the error handling in dma_direct_alloc_pages dma-direct: remove the cached_kernel_address hook dma-coherent: fix integer overflow in the reserved-memory dma allocation
This commit is contained in:
commit
6f43bae382
15
arch/Kconfig
15
arch/Kconfig
|
@ -248,11 +248,18 @@ config ARCH_HAS_SET_DIRECT_MAP
|
||||||
bool
|
bool
|
||||||
|
|
||||||
#
|
#
|
||||||
# Select if arch has an uncached kernel segment and provides the
|
# Select if the architecture provides the arch_dma_set_uncached symbol to
|
||||||
# uncached_kernel_address / cached_kernel_address symbols to use it
|
# either provide an uncached segement alias for a DMA allocation, or
|
||||||
|
# to remap the page tables in place.
|
||||||
#
|
#
|
||||||
config ARCH_HAS_UNCACHED_SEGMENT
|
config ARCH_HAS_DMA_SET_UNCACHED
|
||||||
select ARCH_HAS_DMA_PREP_COHERENT
|
bool
|
||||||
|
|
||||||
|
#
|
||||||
|
# Select if the architectures provides the arch_dma_clear_uncached symbol
|
||||||
|
# to undo an in-place page table remap for uncached access.
|
||||||
|
#
|
||||||
|
config ARCH_HAS_DMA_CLEAR_UNCACHED
|
||||||
bool
|
bool
|
||||||
|
|
||||||
# Select if arch init_task must go in the __init_task_data section
|
# Select if arch init_task must go in the __init_task_data section
|
||||||
|
|
|
@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev,
|
||||||
struct dma_iommu_mapping *mapping);
|
struct dma_iommu_mapping *mapping);
|
||||||
void arm_iommu_detach_device(struct device *dev);
|
void arm_iommu_detach_device(struct device *dev);
|
||||||
|
|
||||||
int arm_dma_supported(struct device *dev, u64 mask);
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev,
|
||||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return whether the given device DMA address mask can be supported
|
||||||
|
* properly. For example, if your device can only drive the low 24-bits
|
||||||
|
* during bus mastering, then you would pass 0x00ffffff as the mask
|
||||||
|
* to this function.
|
||||||
|
*/
|
||||||
|
static int arm_dma_supported(struct device *dev, u64 mask)
|
||||||
|
{
|
||||||
|
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Translate the device's DMA mask to a PFN limit. This
|
||||||
|
* PFN number includes the page which we can DMA to.
|
||||||
|
*/
|
||||||
|
return dma_to_pfn(dev, mask) >= max_dma_pfn;
|
||||||
|
}
|
||||||
|
|
||||||
const struct dma_map_ops arm_dma_ops = {
|
const struct dma_map_ops arm_dma_ops = {
|
||||||
.alloc = arm_dma_alloc,
|
.alloc = arm_dma_alloc,
|
||||||
.free = arm_dma_free,
|
.free = arm_dma_free,
|
||||||
|
@ -219,49 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL(arm_coherent_dma_ops);
|
EXPORT_SYMBOL(arm_coherent_dma_ops);
|
||||||
|
|
||||||
static int __dma_supported(struct device *dev, u64 mask, bool warn)
|
|
||||||
{
|
|
||||||
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Translate the device's DMA mask to a PFN limit. This
|
|
||||||
* PFN number includes the page which we can DMA to.
|
|
||||||
*/
|
|
||||||
if (dma_to_pfn(dev, mask) < max_dma_pfn) {
|
|
||||||
if (warn)
|
|
||||||
dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
|
|
||||||
mask,
|
|
||||||
dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
|
|
||||||
max_dma_pfn + 1);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u64 get_coherent_dma_mask(struct device *dev)
|
|
||||||
{
|
|
||||||
u64 mask = (u64)DMA_BIT_MASK(32);
|
|
||||||
|
|
||||||
if (dev) {
|
|
||||||
mask = dev->coherent_dma_mask;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Sanity check the DMA mask - it must be non-zero, and
|
|
||||||
* must be able to be satisfied by a DMA allocation.
|
|
||||||
*/
|
|
||||||
if (mask == 0) {
|
|
||||||
dev_warn(dev, "coherent DMA mask is unset\n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!__dma_supported(dev, mask, true))
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
|
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -688,7 +662,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
gfp_t gfp, pgprot_t prot, bool is_coherent,
|
gfp_t gfp, pgprot_t prot, bool is_coherent,
|
||||||
unsigned long attrs, const void *caller)
|
unsigned long attrs, const void *caller)
|
||||||
{
|
{
|
||||||
u64 mask = get_coherent_dma_mask(dev);
|
u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
void *addr;
|
void *addr;
|
||||||
bool allowblock, cma;
|
bool allowblock, cma;
|
||||||
|
@ -712,9 +686,6 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!mask)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
buf = kzalloc(sizeof(*buf),
|
buf = kzalloc(sizeof(*buf),
|
||||||
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
|
gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
|
||||||
if (!buf)
|
if (!buf)
|
||||||
|
@ -1087,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
dir);
|
dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Return whether the given device DMA address mask can be supported
|
|
||||||
* properly. For example, if your device can only drive the low 24-bits
|
|
||||||
* during bus mastering, then you would pass 0x00ffffff as the mask
|
|
||||||
* to this function.
|
|
||||||
*/
|
|
||||||
int arm_dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return __dma_supported(dev, mask, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
|
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -8,7 +8,7 @@ config MICROBLAZE
|
||||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||||
select ARCH_HAS_UNCACHED_SEGMENT if !MMU
|
select ARCH_HAS_DMA_SET_UNCACHED if !MMU
|
||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select ARCH_WANT_IPC_PARSE_VERSION
|
select ARCH_WANT_IPC_PARSE_VERSION
|
||||||
select BUILDTIME_TABLE_SORT
|
select BUILDTIME_TABLE_SORT
|
||||||
|
|
|
@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||||
#define UNCACHED_SHADOW_MASK 0
|
#define UNCACHED_SHADOW_MASK 0
|
||||||
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
|
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */
|
||||||
|
|
||||||
void *uncached_kernel_address(void *ptr)
|
void *arch_dma_set_uncached(void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long)ptr;
|
unsigned long addr = (unsigned long)ptr;
|
||||||
|
|
||||||
|
@ -49,11 +49,4 @@ void *uncached_kernel_address(void *ptr)
|
||||||
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
|
pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
|
||||||
return (void *)addr;
|
return (void *)addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *cached_kernel_address(void *ptr)
|
|
||||||
{
|
|
||||||
unsigned long addr = (unsigned long)ptr;
|
|
||||||
|
|
||||||
return (void *)(addr & ~UNCACHED_SHADOW_MASK);
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
|
@ -1192,8 +1192,9 @@ config DMA_NONCOHERENT
|
||||||
# significant advantages.
|
# significant advantages.
|
||||||
#
|
#
|
||||||
select ARCH_HAS_DMA_WRITE_COMBINE
|
select ARCH_HAS_DMA_WRITE_COMBINE
|
||||||
|
select ARCH_HAS_DMA_PREP_COHERENT
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||||
select ARCH_HAS_UNCACHED_SEGMENT
|
select ARCH_HAS_DMA_SET_UNCACHED
|
||||||
select DMA_NONCOHERENT_MMAP
|
select DMA_NONCOHERENT_MMAP
|
||||||
select DMA_NONCOHERENT_CACHE_SYNC
|
select DMA_NONCOHERENT_CACHE_SYNC
|
||||||
select NEED_DMA_MAP_STATE
|
select NEED_DMA_MAP_STATE
|
||||||
|
|
|
@ -49,16 +49,11 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||||
dma_cache_wback_inv((unsigned long)page_address(page), size);
|
dma_cache_wback_inv((unsigned long)page_address(page), size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *uncached_kernel_address(void *addr)
|
void *arch_dma_set_uncached(void *addr, size_t size)
|
||||||
{
|
{
|
||||||
return (void *)(__pa(addr) + UNCAC_BASE);
|
return (void *)(__pa(addr) + UNCAC_BASE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *cached_kernel_address(void *addr)
|
|
||||||
{
|
|
||||||
return __va(addr) - UNCAC_BASE;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_sync_virt(void *addr, size_t size,
|
static inline void dma_sync_virt(void *addr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2,9 +2,10 @@
|
||||||
config NIOS2
|
config NIOS2
|
||||||
def_bool y
|
def_bool y
|
||||||
select ARCH_32BIT_OFF_T
|
select ARCH_32BIT_OFF_T
|
||||||
|
select ARCH_HAS_DMA_PREP_COHERENT
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||||
select ARCH_HAS_UNCACHED_SEGMENT
|
select ARCH_HAS_DMA_SET_UNCACHED
|
||||||
select ARCH_NO_SWAP
|
select ARCH_NO_SWAP
|
||||||
select TIMER_OF
|
select TIMER_OF
|
||||||
select GENERIC_ATOMIC64
|
select GENERIC_ATOMIC64
|
||||||
|
|
|
@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||||
flush_dcache_range(start, start + size);
|
flush_dcache_range(start, start + size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void *uncached_kernel_address(void *ptr)
|
void *arch_dma_set_uncached(void *ptr, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long)ptr;
|
unsigned long addr = (unsigned long)ptr;
|
||||||
|
|
||||||
|
@ -75,13 +75,3 @@ void *uncached_kernel_address(void *ptr)
|
||||||
|
|
||||||
return (void *)ptr;
|
return (void *)ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *cached_kernel_address(void *ptr)
|
|
||||||
{
|
|
||||||
unsigned long addr = (unsigned long)ptr;
|
|
||||||
|
|
||||||
addr &= ~CONFIG_NIOS2_IO_REGION_BASE;
|
|
||||||
addr |= CONFIG_NIOS2_KERNEL_REGION_BASE;
|
|
||||||
|
|
||||||
return (void *)ptr;
|
|
||||||
}
|
|
||||||
|
|
|
@ -7,6 +7,8 @@
|
||||||
config OPENRISC
|
config OPENRISC
|
||||||
def_bool y
|
def_bool y
|
||||||
select ARCH_32BIT_OFF_T
|
select ARCH_32BIT_OFF_T
|
||||||
|
select ARCH_HAS_DMA_SET_UNCACHED
|
||||||
|
select ARCH_HAS_DMA_CLEAR_UNCACHED
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||||
select OF
|
select OF
|
||||||
select OF_EARLY_FLATTREE
|
select OF_EARLY_FLATTREE
|
||||||
|
|
|
@ -11,8 +11,6 @@
|
||||||
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
* Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
|
||||||
*
|
*
|
||||||
* DMA mapping callbacks...
|
* DMA mapping callbacks...
|
||||||
* As alloc_coherent is the only DMA callback being used currently, that's
|
|
||||||
* the only thing implemented properly. The rest need looking into...
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/dma-noncoherent.h>
|
#include <linux/dma-noncoherent.h>
|
||||||
|
@ -67,62 +65,29 @@ static const struct mm_walk_ops clear_nocache_walk_ops = {
|
||||||
.pte_entry = page_clear_nocache,
|
.pte_entry = page_clear_nocache,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
void *arch_dma_set_uncached(void *cpu_addr, size_t size)
|
||||||
* Alloc "coherent" memory, which for OpenRISC means simply uncached.
|
|
||||||
*
|
|
||||||
* This function effectively just calls __get_free_pages, sets the
|
|
||||||
* cache-inhibit bit on those pages, and makes sure that the pages are
|
|
||||||
* flushed out of the cache before they are used.
|
|
||||||
*
|
|
||||||
* If the NON_CONSISTENT attribute is set, then this function just
|
|
||||||
* returns "normal", cachable memory.
|
|
||||||
*
|
|
||||||
* There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
|
|
||||||
* into consideration here, too. All current known implementations of
|
|
||||||
* the OR1K support only strongly ordered memory accesses, so that flag
|
|
||||||
* is being ignored for now; uncached but write-combined memory is a
|
|
||||||
* missing feature of the OR1K.
|
|
||||||
*/
|
|
||||||
void *
|
|
||||||
arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
||||||
gfp_t gfp, unsigned long attrs)
|
|
||||||
{
|
{
|
||||||
unsigned long va;
|
unsigned long va = (unsigned long)cpu_addr;
|
||||||
void *page;
|
int error;
|
||||||
|
|
||||||
page = alloc_pages_exact(size, gfp | __GFP_ZERO);
|
|
||||||
if (!page)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* This gives us the real physical address of the first page. */
|
|
||||||
*dma_handle = __pa(page);
|
|
||||||
|
|
||||||
va = (unsigned long)page;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to iterate through the pages, clearing the dcache for
|
* We need to iterate through the pages, clearing the dcache for
|
||||||
* them and setting the cache-inhibit bit.
|
* them and setting the cache-inhibit bit.
|
||||||
*/
|
*/
|
||||||
if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
|
error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
|
||||||
NULL)) {
|
NULL);
|
||||||
free_pages_exact(page, size);
|
if (error)
|
||||||
return NULL;
|
return ERR_PTR(error);
|
||||||
}
|
return cpu_addr;
|
||||||
|
|
||||||
return (void *)va;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void arch_dma_clear_uncached(void *cpu_addr, size_t size)
|
||||||
arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
||||||
dma_addr_t dma_handle, unsigned long attrs)
|
|
||||||
{
|
{
|
||||||
unsigned long va = (unsigned long)vaddr;
|
unsigned long va = (unsigned long)cpu_addr;
|
||||||
|
|
||||||
/* walk_page_range shouldn't be able to fail here */
|
/* walk_page_range shouldn't be able to fail here */
|
||||||
WARN_ON(walk_page_range(&init_mm, va, va + size,
|
WARN_ON(walk_page_range(&init_mm, va, va + size,
|
||||||
&clear_nocache_walk_ops, NULL));
|
&clear_nocache_walk_ops, NULL));
|
||||||
|
|
||||||
free_pages_exact(vaddr, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
|
void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
|
||||||
|
|
|
@ -6,7 +6,7 @@ config XTENSA
|
||||||
select ARCH_HAS_DMA_PREP_COHERENT if MMU
|
select ARCH_HAS_DMA_PREP_COHERENT if MMU
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
|
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
|
||||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
|
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
|
||||||
select ARCH_HAS_UNCACHED_SEGMENT if MMU
|
select ARCH_HAS_DMA_SET_UNCACHED if MMU
|
||||||
select ARCH_USE_QUEUED_RWLOCKS
|
select ARCH_USE_QUEUED_RWLOCKS
|
||||||
select ARCH_USE_QUEUED_SPINLOCKS
|
select ARCH_USE_QUEUED_SPINLOCKS
|
||||||
select ARCH_WANT_FRAME_POINTERS
|
select ARCH_WANT_FRAME_POINTERS
|
||||||
|
|
|
@ -88,18 +88,12 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory caching is platform-dependent in noMMU xtensa configurations.
|
* Memory caching is platform-dependent in noMMU xtensa configurations.
|
||||||
* The following two functions should be implemented in platform code
|
* This function should be implemented in platform code in order to enable
|
||||||
* in order to enable coherent DMA memory operations when CONFIG_MMU is not
|
* coherent DMA memory operations when CONFIG_MMU is not enabled.
|
||||||
* enabled.
|
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_MMU
|
#ifdef CONFIG_MMU
|
||||||
void *uncached_kernel_address(void *p)
|
void *arch_dma_set_uncached(void *p, size_t size)
|
||||||
{
|
{
|
||||||
return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
|
return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *cached_kernel_address(void *p)
|
|
||||||
{
|
|
||||||
return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
|
|
||||||
}
|
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
|
@ -108,7 +108,7 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
|
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
|
||||||
|
|
||||||
void *uncached_kernel_address(void *addr);
|
void *arch_dma_set_uncached(void *addr, size_t size);
|
||||||
void *cached_kernel_address(void *addr);
|
void arch_dma_clear_uncached(void *addr, size_t size);
|
||||||
|
|
||||||
#endif /* _LINUX_DMA_NONCOHERENT_H */
|
#endif /* _LINUX_DMA_NONCOHERENT_H */
|
||||||
|
|
|
@ -134,7 +134,7 @@ static void *__dma_alloc_from_coherent(struct device *dev,
|
||||||
|
|
||||||
spin_lock_irqsave(&mem->spinlock, flags);
|
spin_lock_irqsave(&mem->spinlock, flags);
|
||||||
|
|
||||||
if (unlikely(size > (mem->size << PAGE_SHIFT)))
|
if (unlikely(size > ((dma_addr_t)mem->size << PAGE_SHIFT)))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
||||||
|
@ -144,8 +144,9 @@ static void *__dma_alloc_from_coherent(struct device *dev,
|
||||||
/*
|
/*
|
||||||
* Memory was found in the coherent area.
|
* Memory was found in the coherent area.
|
||||||
*/
|
*/
|
||||||
*dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
|
*dma_handle = dma_get_device_base(dev, mem) +
|
||||||
ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
((dma_addr_t)pageno << PAGE_SHIFT);
|
||||||
|
ret = mem->virt_base + ((dma_addr_t)pageno << PAGE_SHIFT);
|
||||||
spin_unlock_irqrestore(&mem->spinlock, flags);
|
spin_unlock_irqrestore(&mem->spinlock, flags);
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -194,7 +195,7 @@ static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
|
||||||
int order, void *vaddr)
|
int order, void *vaddr)
|
||||||
{
|
{
|
||||||
if (mem && vaddr >= mem->virt_base && vaddr <
|
if (mem && vaddr >= mem->virt_base && vaddr <
|
||||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
(mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
|
||||||
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
@ -238,10 +239,10 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
|
||||||
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
|
struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
|
||||||
{
|
{
|
||||||
if (mem && vaddr >= mem->virt_base && vaddr + size <=
|
if (mem && vaddr >= mem->virt_base && vaddr + size <=
|
||||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
(mem->virt_base + ((dma_addr_t)mem->size << PAGE_SHIFT))) {
|
||||||
unsigned long off = vma->vm_pgoff;
|
unsigned long off = vma->vm_pgoff;
|
||||||
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||||
int user_count = vma_pages(vma);
|
unsigned long user_count = vma_pages(vma);
|
||||||
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
|
||||||
*ret = -ENXIO;
|
*ret = -ENXIO;
|
||||||
|
|
|
@ -157,11 +157,8 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
|
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
|
||||||
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
if (!ret) {
|
if (!ret)
|
||||||
dma_free_contiguous(dev, page, size);
|
goto out_free_pages;
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
|
@ -174,8 +171,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
* so log an error and fail.
|
* so log an error and fail.
|
||||||
*/
|
*/
|
||||||
dev_info(dev, "Rejecting highmem page from CMA.\n");
|
dev_info(dev, "Rejecting highmem page from CMA.\n");
|
||||||
dma_free_contiguous(dev, page, size);
|
goto out_free_pages;
|
||||||
return NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = page_address(page);
|
ret = page_address(page);
|
||||||
|
@ -184,10 +180,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
|
|
||||||
memset(ret, 0, size);
|
memset(ret, 0, size);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||||
dma_alloc_need_uncached(dev, attrs)) {
|
dma_alloc_need_uncached(dev, attrs)) {
|
||||||
arch_dma_prep_coherent(page, size);
|
arch_dma_prep_coherent(page, size);
|
||||||
ret = uncached_kernel_address(ret);
|
ret = arch_dma_set_uncached(ret, size);
|
||||||
|
if (IS_ERR(ret))
|
||||||
|
goto out_free_pages;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
if (force_dma_unencrypted(dev))
|
if (force_dma_unencrypted(dev))
|
||||||
|
@ -195,6 +193,9 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
else
|
else
|
||||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||||
return ret;
|
return ret;
|
||||||
|
out_free_pages:
|
||||||
|
dma_free_contiguous(dev, page, size);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||||
|
@ -218,6 +219,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
|
||||||
vunmap(cpu_addr);
|
vunmap(cpu_addr);
|
||||||
|
else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
|
||||||
|
arch_dma_clear_uncached(cpu_addr, size);
|
||||||
|
|
||||||
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
|
dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
|
||||||
}
|
}
|
||||||
|
@ -225,7 +228,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||||
void *dma_direct_alloc(struct device *dev, size_t size,
|
void *dma_direct_alloc(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||||
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||||
dma_alloc_need_uncached(dev, attrs))
|
dma_alloc_need_uncached(dev, attrs))
|
||||||
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
|
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
|
||||||
|
@ -235,7 +238,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
|
||||||
void dma_direct_free(struct device *dev, size_t size,
|
void dma_direct_free(struct device *dev, size_t size,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
|
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
|
||||||
{
|
{
|
||||||
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
|
||||||
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||||
dma_alloc_need_uncached(dev, attrs))
|
dma_alloc_need_uncached(dev, attrs))
|
||||||
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
|
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
|
||||||
|
|
Loading…
Reference in New Issue