dma-contiguous: add dma_{alloc,free}_contiguous() helpers
Both dma_alloc_from_contiguous() and dma_release_from_contiguous() are very simply implemented, but requiring callers to pass certain parameters like count and align, and taking a boolean parameter to check __GFP_NOWARN in the allocation flags. So every function call duplicates similar work: unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); [...] dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); Additionally, as CMA can be used only in the context which permits sleeping, most of callers do a gfpflags_allow_blocking() check and a corresponding fallback allocation of normal pages upon any false result: if (gfpflags_allow_blocking(flag)) page = dma_alloc_from_contiguous(); if (!page) page = alloc_pages(); [...] if (!dma_release_from_contiguous(dev, page, count)) __free_pages(page, get_order(size)); So this patch simplifies those function calls by abstracting these operations into the two new functions: dma_{alloc,free}_contiguous. As some callers of dma_{alloc,release}_from_contiguous() might be complicated, this patch just implements these two new functions to kernel/dma/direct.c only as an initial step. Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Nicolin Chen <nicoleotsuka@gmail.com> Tested-by: dann frazier <dann.frazier@canonical.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
1b96142315
commit
b1d2dc009d
|
@ -111,6 +111,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
|
||||||
unsigned int order, bool no_warn);
|
unsigned int order, bool no_warn);
|
||||||
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||||
int count);
|
int count);
|
||||||
|
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
|
||||||
|
void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
@ -153,6 +155,17 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
|
||||||
|
gfp_t gfp)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dma_free_contiguous(struct device *dev, struct page *page,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -214,6 +214,53 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
||||||
return cma_release(dev_get_cma_area(dev), pages, count);
|
return cma_release(dev_get_cma_area(dev), pages, count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_alloc_contiguous() - allocate contiguous pages
|
||||||
|
* @dev: Pointer to device for which the allocation is performed.
|
||||||
|
* @size: Requested allocation size.
|
||||||
|
* @gfp: Allocation flags.
|
||||||
|
*
|
||||||
|
* This function allocates contiguous memory buffer for specified device. It
|
||||||
|
* first tries to use device specific contiguous memory area if available or
|
||||||
|
* the default global one, then tries a fallback allocation of normal pages.
|
||||||
|
*/
|
||||||
|
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||||
|
{
|
||||||
|
int node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
|
||||||
|
size_t count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
size_t align = get_order(PAGE_ALIGN(size));
|
||||||
|
struct cma *cma = dev_get_cma_area(dev);
|
||||||
|
struct page *page = NULL;
|
||||||
|
|
||||||
|
/* CMA can be used only in the context which permits sleeping */
|
||||||
|
if (cma && gfpflags_allow_blocking(gfp)) {
|
||||||
|
align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
|
||||||
|
page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Fallback allocation of normal pages */
|
||||||
|
if (!page)
|
||||||
|
page = alloc_pages_node(node, gfp, align);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dma_free_contiguous() - release allocated pages
|
||||||
|
* @dev: Pointer to device for which the pages were allocated.
|
||||||
|
* @page: Pointer to the allocated pages.
|
||||||
|
* @size: Size of allocated pages.
|
||||||
|
*
|
||||||
|
* This function releases memory allocated by dma_alloc_contiguous(). As the
|
||||||
|
* cma_release returns false when provided pages do not belong to contiguous
|
||||||
|
* area and true otherwise, this function then does a fallback __free_pages()
|
||||||
|
* upon a false-return.
|
||||||
|
*/
|
||||||
|
void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
|
||||||
|
{
|
||||||
|
if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT))
|
||||||
|
__free_pages(page, get_order(size));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Support for reserved memory regions defined in device tree
|
* Support for reserved memory regions defined in device tree
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -96,8 +96,6 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||||
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
||||||
int page_order = get_order(size);
|
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
u64 phys_mask;
|
u64 phys_mask;
|
||||||
|
|
||||||
|
@ -109,20 +107,9 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||||
&phys_mask);
|
&phys_mask);
|
||||||
again:
|
again:
|
||||||
/* CMA can be used only in the context which permits sleeping */
|
page = dma_alloc_contiguous(dev, size, gfp);
|
||||||
if (gfpflags_allow_blocking(gfp)) {
|
|
||||||
page = dma_alloc_from_contiguous(dev, count, page_order,
|
|
||||||
gfp & __GFP_NOWARN);
|
|
||||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
|
||||||
dma_release_from_contiguous(dev, page, count);
|
|
||||||
page = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!page)
|
|
||||||
page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
|
|
||||||
|
|
||||||
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
|
||||||
__free_pages(page, page_order);
|
dma_free_contiguous(dev, page, size);
|
||||||
page = NULL;
|
page = NULL;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||||
|
@ -154,7 +141,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
if (PageHighMem(page)) {
|
if (PageHighMem(page)) {
|
||||||
/*
|
/*
|
||||||
* Depending on the cma= arguments and per-arch setup
|
* Depending on the cma= arguments and per-arch setup
|
||||||
* dma_alloc_from_contiguous could return highmem pages.
|
* dma_alloc_contiguous could return highmem pages.
|
||||||
* Without remapping there is no way to return them here,
|
* Without remapping there is no way to return them here,
|
||||||
* so log an error and fail.
|
* so log an error and fail.
|
||||||
*/
|
*/
|
||||||
|
@ -176,10 +163,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||||
|
|
||||||
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
|
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page)
|
||||||
{
|
{
|
||||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
dma_free_contiguous(dev, page, size);
|
||||||
|
|
||||||
if (!dma_release_from_contiguous(dev, page, count))
|
|
||||||
__free_pages(page, get_order(size));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
||||||
|
|
Loading…
Reference in New Issue