ARC: dma [non IOC]: fix arc_dma_sync_single_for_(device|cpu)
ARC backend for dma_sync_single_for_(device|cpu) was broken as it was not honoring the @dir argument and simply forcing it based on the call: - arc_dma_sync_single_for_device(dir) assumed DMA_TO_DEVICE (cache wback) - arc_dma_sync_single_for_cpu(dir) assumed DMA_FROM_DEVICE (cache inv) This is not true given the DMA API programming model and has been discussed here [1] in some detail. Interestingly while the deficiency has been there forever, it only started showing up after 4.17 dma common ops rework, commita8eb92d02d
("arc: fix arc_dma_{map,unmap}_page") which wired up these calls under the more commonly used dma_map_page API triggering the issue. [1]: https://lkml.org/lkml/2018/5/18/979 Fixes: commita8eb92d02d
("arc: fix arc_dma_{map,unmap}_page") Cc: stable@kernel.org # v4.17+ Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> [vgupta: reworked changelog]
This commit is contained in:
parent
6d20caed9b
commit
4c612add7b
|
@ -129,14 +129,59 @@ int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache operations depending on function and direction argument, inspired by
|
||||
* https://lkml.org/lkml/2018/5/18/979
|
||||
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
|
||||
* dma-mapping: provide a generic dma-noncoherent implementation)"
|
||||
*
|
||||
* | map == for_device | unmap == for_cpu
|
||||
* |----------------------------------------------------------------
|
||||
* TO_DEV | writeback writeback | none none
|
||||
* FROM_DEV | invalidate invalidate | invalidate* invalidate*
|
||||
* BIDIR | writeback+inv writeback+inv | invalidate invalidate
|
||||
*
|
||||
* [*] needed for CPU speculative prefetches
|
||||
*
|
||||
* NOTE: we don't check the validity of direction argument as it is done in
|
||||
* upper layer functions (in include/linux/dma-mapping.h)
|
||||
*/
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_wback(paddr, size);
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
dma_cache_wback(paddr, size);
|
||||
break;
|
||||
|
||||
case DMA_FROM_DEVICE:
|
||||
dma_cache_inv(paddr, size);
|
||||
break;
|
||||
|
||||
case DMA_BIDIRECTIONAL:
|
||||
dma_cache_wback_inv(paddr, size);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_cache_inv(paddr, size);
|
||||
switch (dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
break;
|
||||
|
||||
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
|
||||
case DMA_FROM_DEVICE:
|
||||
case DMA_BIDIRECTIONAL:
|
||||
dma_cache_inv(paddr, size);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue