dma-mapping: merge the generic remapping helpers into dma-direct

Integrate the generic dma remapping implementation into the main flow.
This prepares for architectures like xtensa that use an uncached
segment for pages in the kernel mapping, but can also remap highmem
from CMA.  To simplify that implementation we now always deduct the
page from the physical address via the DMA address instead of the
virtual address.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Filippov <jcmvbkbc@gmail.com>
This commit is contained in:
Christoph Hellwig 2019-10-29 11:06:32 +01:00
parent 34dc0ea6bc
commit 3acac06550
2 changed files with 48 additions and 61 deletions

View File

@ -12,6 +12,7 @@
#include <linux/dma-contiguous.h> #include <linux/dma-contiguous.h>
#include <linux/dma-noncoherent.h> #include <linux/dma-noncoherent.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/vmalloc.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
@ -137,6 +138,15 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page; struct page *page;
void *ret; void *ret;
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs) &&
!gfpflags_allow_blocking(gfp)) {
ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
if (!ret)
return NULL;
goto done;
}
page = __dma_direct_alloc_pages(dev, size, gfp, attrs); page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
if (!page) if (!page)
return NULL; return NULL;
@ -146,9 +156,28 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
/* remove any dirty cache lines on the kernel alias */ /* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page)) if (!PageHighMem(page))
arch_dma_prep_coherent(page, size); arch_dma_prep_coherent(page, size);
*dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */ /* return the page pointer as the opaque cookie */
return page; ret = page;
goto done;
}
if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs)) ||
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, PAGE_ALIGN(size));
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
dma_free_contiguous(dev, page, size);
return ret;
}
memset(ret, 0, size);
goto done;
} }
if (PageHighMem(page)) { if (PageHighMem(page)) {
@ -164,12 +193,9 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
} }
ret = page_address(page); ret = page_address(page);
if (force_dma_unencrypted(dev)) { if (force_dma_unencrypted(dev))
set_memory_decrypted((unsigned long)ret, 1 << get_order(size)); set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
} else {
*dma_handle = phys_to_dma(dev, page_to_phys(page));
}
memset(ret, 0, size); memset(ret, 0, size);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
@ -177,7 +203,11 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
arch_dma_prep_coherent(page, size); arch_dma_prep_coherent(page, size);
ret = uncached_kernel_address(ret); ret = uncached_kernel_address(ret);
} }
done:
if (force_dma_unencrypted(dev))
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
else
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret; return ret;
} }
@ -193,19 +223,24 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
return; return;
} }
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
return;
if (force_dma_unencrypted(dev)) if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
dma_alloc_need_uncached(dev, attrs)) vunmap(cpu_addr);
cpu_addr = cached_kernel_address(cpu_addr);
dma_free_contiguous(dev, virt_to_page(cpu_addr), size); dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
} }
void *dma_direct_alloc(struct device *dev, size_t size, void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs)) dma_alloc_need_uncached(dev, attrs))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
@ -215,6 +250,7 @@ void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{ {
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs)) dma_alloc_need_uncached(dev, attrs))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
else else

View File

@ -210,53 +210,4 @@ bool dma_free_from_pool(void *start, size_t size)
gen_pool_free(atomic_pool, (unsigned long)start, size); gen_pool_free(atomic_pool, (unsigned long)start, size);
return true; return true;
} }
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, unsigned long attrs)
{
struct page *page = NULL;
void *ret;
size = PAGE_ALIGN(size);
if (!gfpflags_allow_blocking(flags)) {
ret = dma_alloc_from_pool(size, &page, flags);
if (!ret)
return NULL;
goto done;
}
page = __dma_direct_alloc_pages(dev, size, flags, attrs);
if (!page)
return NULL;
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret) {
dma_free_contiguous(dev, page, size);
return ret;
}
memset(ret, 0, size);
done:
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
}
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
phys_addr_t phys = dma_to_phys(dev, dma_handle);
struct page *page = pfn_to_page(__phys_to_pfn(phys));
vunmap(vaddr);
dma_free_contiguous(dev, page, size);
}
}
#endif /* CONFIG_DMA_DIRECT_REMAP */ #endif /* CONFIG_DMA_DIRECT_REMAP */