mirror of https://gitee.com/openkylin/linux.git
ARM: 8561/4: dma-mapping: Fix the coherent case when iommu is used
When doing dma allocation with IOMMU the __iommu_alloc_atomic() was
used even when the system was coherent. However, this function
allocates from a non-cacheable pool, which is fine when the device is
not cache coherent but won't work as expected in the device is cache
coherent. Indeed, the CPU and device must access the memory using the
same cacheability attributes.
Moreover when the devices are coherent, the mmap call must not change
the pg_prot flags in the vma struct. The arm_coherent_iommu_mmap_attrs
has been updated in the same way that it was done for the arm_dma_mmap
in commit 55af8a9164
("ARM: 8387/1: arm/mm/dma-mapping.c: Add
arm_coherent_dma_mmap").
Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
f127089650
commit
565068221b
|
@ -1476,13 +1476,16 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
|
||||||
dma_addr_t *handle)
|
dma_addr_t *handle, int coherent_flag)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *addr;
|
void *addr;
|
||||||
|
|
||||||
addr = __alloc_from_pool(size, &page);
|
if (coherent_flag == COHERENT)
|
||||||
|
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||||
|
else
|
||||||
|
addr = __alloc_from_pool(size, &page);
|
||||||
if (!addr)
|
if (!addr)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -1498,14 +1501,18 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
|
static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
|
||||||
dma_addr_t handle, size_t size)
|
dma_addr_t handle, size_t size, int coherent_flag)
|
||||||
{
|
{
|
||||||
__iommu_remove_mapping(dev, handle, size);
|
__iommu_remove_mapping(dev, handle, size);
|
||||||
__free_from_pool(cpu_addr, size);
|
if (coherent_flag == COHERENT)
|
||||||
|
__dma_free_buffer(virt_to_page(cpu_addr), size);
|
||||||
|
else
|
||||||
|
__free_from_pool(cpu_addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
|
||||||
|
int coherent_flag)
|
||||||
{
|
{
|
||||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
|
@ -1514,8 +1521,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
*handle = DMA_ERROR_CODE;
|
*handle = DMA_ERROR_CODE;
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
if (!gfpflags_allow_blocking(gfp))
|
if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
|
||||||
return __iommu_alloc_atomic(dev, size, handle);
|
return __iommu_alloc_simple(dev, size, gfp, handle,
|
||||||
|
coherent_flag);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Following is a work-around (a.k.a. hack) to prevent pages
|
* Following is a work-around (a.k.a. hack) to prevent pages
|
||||||
|
@ -1526,8 +1534,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
*/
|
*/
|
||||||
gfp &= ~(__GFP_COMP);
|
gfp &= ~(__GFP_COMP);
|
||||||
|
|
||||||
/* For now always consider we are in a non-coherent case */
|
pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
|
||||||
pages = __iommu_alloc_buffer(dev, size, gfp, attrs, NORMAL);
|
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
@ -1552,7 +1559,19 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
|
@ -1562,8 +1581,6 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
unsigned long off = vma->vm_pgoff;
|
unsigned long off = vma->vm_pgoff;
|
||||||
|
|
||||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
|
||||||
|
|
||||||
if (!pages)
|
if (!pages)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
|
|
||||||
|
@ -1584,19 +1601,34 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static int arm_iommu_mmap_attrs(struct device *dev,
|
||||||
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
||||||
|
|
||||||
|
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int arm_coherent_iommu_mmap_attrs(struct device *dev,
|
||||||
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* free a page as defined by the above mapping.
|
* free a page as defined by the above mapping.
|
||||||
* Must not be called with IRQs disabled.
|
* Must not be called with IRQs disabled.
|
||||||
*/
|
*/
|
||||||
void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||||
dma_addr_t handle, struct dma_attrs *attrs)
|
dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
|
||||||
{
|
{
|
||||||
struct page **pages;
|
struct page **pages;
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
if (__in_atomic_pool(cpu_addr, size)) {
|
if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
|
||||||
__iommu_free_atomic(dev, cpu_addr, handle, size);
|
__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1615,6 +1647,18 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||||
__iommu_free_buffer(dev, pages, size, attrs);
|
__iommu_free_buffer(dev, pages, size, attrs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void arm_iommu_free_attrs(struct device *dev, size_t size,
|
||||||
|
void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
|
||||||
|
void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
|
||||||
|
}
|
||||||
|
|
||||||
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
void *cpu_addr, dma_addr_t dma_addr,
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
size_t size, struct dma_attrs *attrs)
|
size_t size, struct dma_attrs *attrs)
|
||||||
|
@ -2019,9 +2063,9 @@ struct dma_map_ops iommu_ops = {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dma_map_ops iommu_coherent_ops = {
|
struct dma_map_ops iommu_coherent_ops = {
|
||||||
.alloc = arm_iommu_alloc_attrs,
|
.alloc = arm_coherent_iommu_alloc_attrs,
|
||||||
.free = arm_iommu_free_attrs,
|
.free = arm_coherent_iommu_free_attrs,
|
||||||
.mmap = arm_iommu_mmap_attrs,
|
.mmap = arm_coherent_iommu_mmap_attrs,
|
||||||
.get_sgtable = arm_iommu_get_sgtable,
|
.get_sgtable = arm_iommu_get_sgtable,
|
||||||
|
|
||||||
.map_page = arm_coherent_iommu_map_page,
|
.map_page = arm_coherent_iommu_map_page,
|
||||||
|
|
Loading…
Reference in New Issue