xen/arm: use hypercall to flush caches in map_page

In xen_dma_map_page, if the page is a local page, call the native
map_page dma_ops. If the page is foreign, call __xen_dma_map_page that
issues any required cache maintenane operations via hypercall.

The reason for doing this is that the native dma_ops map_page could
allocate buffers than need to be freed. If the page is foreign we don't
call the native unmap_page dma_ops function, resulting in a memory leak.

Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
Stefano Stabellini 2014-11-21 11:05:39 +00:00 committed by David Vrabel
parent a0f2dee0cd
commit 3567258d28
2 changed files with 24 additions and 1 deletions

View File

@ -5,6 +5,9 @@
#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs);
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs);
@ -32,7 +35,15 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
/* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
* is a foreign page grant-mapped in dom0. If the page is local we
* can safely call the native dma_ops function, otherwise we call
* the xen specific function. */
if (local)
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,

View File

@ -43,6 +43,18 @@ static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
}
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
if (is_device_dma_coherent(hwdev))
return;
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
return;
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
}
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)