2009-10-20 11:55:56 +08:00
|
|
|
/*
|
|
|
|
* DMA mapping support for platforms lacking IOMMUs.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2009 Paul Mundt
|
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
|
|
|
|
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
|
|
|
unsigned long offset, size_t size,
|
|
|
|
enum dma_data_direction dir,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2009-10-20 11:55:56 +08:00
|
|
|
{
|
|
|
|
dma_addr_t addr = page_to_phys(page) + offset;
|
|
|
|
|
|
|
|
WARN_ON(size == 0);
|
2016-12-15 07:05:12 +08:00
|
|
|
|
|
|
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
|
dma_cache_sync(dev, page_address(page) + offset, size, dir);
|
2009-10-20 11:55:56 +08:00
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
int nents, enum dma_data_direction dir,
|
2016-08-04 04:46:00 +08:00
|
|
|
unsigned long attrs)
|
2009-10-20 11:55:56 +08:00
|
|
|
{
|
|
|
|
struct scatterlist *s;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
WARN_ON(nents == 0 || sg[0].length == 0);
|
|
|
|
|
|
|
|
for_each_sg(sg, s, nents, i) {
|
|
|
|
BUG_ON(!sg_page(s));
|
|
|
|
|
2016-12-15 07:05:12 +08:00
|
|
|
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
|
|
|
dma_cache_sync(dev, sg_virt(s), s->length, dir);
|
2009-10-20 11:55:56 +08:00
|
|
|
|
|
|
|
s->dma_address = sg_phys(s);
|
|
|
|
s->dma_length = s->length;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nents;
|
|
|
|
}
|
|
|
|
|
2009-10-27 09:35:02 +08:00
|
|
|
#ifdef CONFIG_DMA_NONCOHERENT
|
2009-10-20 11:55:56 +08:00
|
|
|
static void nommu_sync_single(struct device *dev, dma_addr_t addr,
|
|
|
|
size_t size, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
dma_cache_sync(dev, phys_to_virt(addr), size, dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
|
|
|
|
int nelems, enum dma_data_direction dir)
|
|
|
|
{
|
|
|
|
struct scatterlist *s;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_sg(sg, s, nelems, i)
|
|
|
|
dma_cache_sync(dev, sg_virt(s), s->length, dir);
|
|
|
|
}
|
2009-10-27 09:35:02 +08:00
|
|
|
#endif
|
2009-10-20 11:55:56 +08:00
|
|
|
|
2017-01-21 05:04:01 +08:00
|
|
|
const struct dma_map_ops nommu_dma_ops = {
|
2011-12-14 19:11:13 +08:00
|
|
|
.alloc = dma_generic_alloc_coherent,
|
|
|
|
.free = dma_generic_free_coherent,
|
2009-10-20 11:55:56 +08:00
|
|
|
.map_page = nommu_map_page,
|
|
|
|
.map_sg = nommu_map_sg,
|
2009-10-27 09:35:02 +08:00
|
|
|
#ifdef CONFIG_DMA_NONCOHERENT
|
2009-10-20 11:55:56 +08:00
|
|
|
.sync_single_for_device = nommu_sync_single,
|
|
|
|
.sync_sg_for_device = nommu_sync_sg,
|
2009-10-27 09:35:02 +08:00
|
|
|
#endif
|
2009-10-20 11:55:56 +08:00
|
|
|
.is_phys = 1,
|
|
|
|
};
|
|
|
|
|
|
|
|
void __init no_iommu_init(void)
|
|
|
|
{
|
|
|
|
if (dma_ops)
|
|
|
|
return;
|
|
|
|
dma_ops = &nommu_dma_ops;
|
|
|
|
}
|