dma mapping changes for Linux 4.16:
This pull requests contains a consolidation of the generic no-IOMMU code, a well as the glue code for swiotlb. All the code is based on the x86 implementation with hooks to allow all architectures that aren't cache coherent to use it. The x86 conversion itself has been deferred because the x86 maintainers were a little busy in the last months. -----BEGIN PGP SIGNATURE----- iQI/BAABCAApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlpxcVoLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYN/Lw/+Je9teM4NPQ8lU/ncbJN/bUzCFGJ6dFt2eVX/6xs3 sfl8vBdeHt6CBM02rRNecEr31z3+orjQes5JnlEJFYeG3jumV0zCPw/zbxqjzbJ1 3n6cckLxbxzy8Ca1G/BVjHLAUX5eWp1ujn/Q4d03VKVQZhJvFYlqDbP3TrNVx7xn k86u37p/o+ngjwX66UdZ3C4iIBF8zqy6n2kkpv4HUQtHHzPwEvliN39eNilovb56 iGOzjDX1UWHAu4xCTVnPHSG4fA4XU41NWzIN3DIVPE25lYSISSl9TFAdR8GeZA0G 0Yj6sW53pRSoUwco1ocoS44/FgrPOB5/vHIL06pABvicXBiomje1QylqcK7zAczk esjkfPEZrmZuu99GtqFyDNKEvKKdy+aBGaTZ3y+NxsuBs+0xS2Owz1IE4Tk28xaw xh7zn+CVdk2fJh6ZIdw5Eu9b9VN08UriqDmDzO/ylDlcNGcDi7wcxiSTEkHJ1ON/ g9nletV6f3egL0wljDcOnhCJCHTvmWEeq3z8lE55QzPzSH0hHpnGQ2WD0tKrroxz kjOZp0TdXa4F5iysOHe2xl2sftOH0zIkBQJ+oBcK12mTaLu21+yeuCggQXJ/CBdk 1Ol7l9g9T0TDuZPfiTHt5+6jmECQs92LElWA8x7uF7Fpix3BpnafWaaSMSsosF3F D1Y= =Nrl9 -----END PGP SIGNATURE----- Merge tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping Pull dma mapping updates from Christoph Hellwig: "Except for a runtime warning fix from Christian this is all about consolidation of the generic no-IOMMU code, a well as the glue code for swiotlb. All the code is based on the x86 implementation with hooks to allow all architectures that aren't cache coherent to use it. The x86 conversion itself has been deferred because the x86 maintainers were a little busy in the last months" * tag 'dma-mapping-4.16' of git://git.infradead.org/users/hch/dma-mapping: (57 commits) MAINTAINERS: add the iommu list for swiotlb and xen-swiotlb arm64: use swiotlb_alloc and swiotlb_free arm64: replace ZONE_DMA with ZONE_DMA32 mips: use swiotlb_{alloc,free} mips/netlogic: remove swiotlb support tile: use generic swiotlb_ops tile: replace ZONE_DMA with ZONE_DMA32 unicore32: use generic swiotlb_ops ia64: remove an ifdef around the content of pci-dma.c ia64: clean up swiotlb support ia64: use generic swiotlb_ops ia64: replace ZONE_DMA with ZONE_DMA32 swiotlb: remove various exports swiotlb: refactor coherent buffer allocation swiotlb: refactor coherent buffer freeing swiotlb: wire up ->dma_supported in swiotlb_dma_ops swiotlb: add common swiotlb_map_ops swiotlb: rename swiotlb_free to swiotlb_exit x86: rename swiotlb_dma_ops powerpc: rename swiotlb_dma_ops ...
This commit is contained in:
commit
2382dc9a3e
|
@ -4343,10 +4343,12 @@ T: git git://git.infradead.org/users/hch/dma-mapping.git
|
|||
W: http://git.infradead.org/users/hch/dma-mapping.git
|
||||
S: Supported
|
||||
F: lib/dma-debug.c
|
||||
F: lib/dma-noop.c
|
||||
F: lib/dma-direct.c
|
||||
F: lib/dma-virt.c
|
||||
F: drivers/base/dma-mapping.c
|
||||
F: drivers/base/dma-coherent.c
|
||||
F: include/asm-generic/dma-mapping.h
|
||||
F: include/linux/dma-direct.h
|
||||
F: include/linux/dma-mapping.h
|
||||
|
||||
DME1737 HARDWARE MONITOR DRIVER
|
||||
|
@ -13071,7 +13073,7 @@ F: arch/x86/boot/video*
|
|||
|
||||
SWIOTLB SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: iommu@lists.linux-foundation.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
|
||||
S: Supported
|
||||
F: lib/swiotlb.c
|
||||
|
@ -15026,6 +15028,7 @@ F: include/xen/interface/io/vscsiif.h
|
|||
XEN SWIOTLB SUBSYSTEM
|
||||
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
|
||||
L: iommu@lists.linux-foundation.org
|
||||
S: Supported
|
||||
F: arch/x86/xen/*swiotlb*
|
||||
F: drivers/xen/*swiotlb*
|
||||
|
|
|
@ -938,6 +938,10 @@ config STRICT_MODULE_RWX
|
|||
and non-text memory will be made non-executable. This provides
|
||||
protection against certain security exploits (e.g. writing to text)
|
||||
|
||||
# select if the architecture provides an asm/dma-direct.h header
|
||||
config ARCH_HAS_PHYS_TO_DMA
|
||||
bool
|
||||
|
||||
config ARCH_HAS_REFCOUNT
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -209,6 +209,7 @@ config ALPHA_EIGER
|
|||
|
||||
config ALPHA_JENSEN
|
||||
bool "Jensen"
|
||||
depends on BROKEN
|
||||
help
|
||||
DEC PC 150 AXP (aka Jensen): This is a very old Digital system - one
|
||||
of the first-generation Alpha systems. A number of these systems
|
||||
|
|
|
@ -463,9 +463,6 @@ config ARCH_PHYS_ADDR_T_64BIT
|
|||
config ARCH_DMA_ADDR_T_64BIT
|
||||
bool
|
||||
|
||||
config ARC_PLAT_NEEDS_PHYS_TO_DMA
|
||||
bool
|
||||
|
||||
config ARC_KVADDR_SIZE
|
||||
int "Kernel Virtual Address Space size (MB)"
|
||||
range 0 512
|
||||
|
|
|
@ -11,13 +11,6 @@
|
|||
#ifndef ASM_ARC_DMA_MAPPING_H
|
||||
#define ASM_ARC_DMA_MAPPING_H
|
||||
|
||||
#ifndef CONFIG_ARC_PLAT_NEEDS_PHYS_TO_DMA
|
||||
#define plat_dma_to_phys(dev, dma_handle) ((phys_addr_t)(dma_handle))
|
||||
#define plat_phys_to_dma(dev, paddr) ((dma_addr_t)(paddr))
|
||||
#else
|
||||
#include <plat/dma.h>
|
||||
#endif
|
||||
|
||||
extern const struct dma_map_ops arc_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
|
|
|
@ -60,7 +60,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
|||
/* This is linear addr (0x8000_0000 based) */
|
||||
paddr = page_to_phys(page);
|
||||
|
||||
*dma_handle = plat_phys_to_dma(dev, paddr);
|
||||
*dma_handle = paddr;
|
||||
|
||||
/* This is kernel Virtual address (0x7000_0000 based) */
|
||||
if (need_kvaddr) {
|
||||
|
@ -92,7 +92,7 @@ static void *arc_dma_alloc(struct device *dev, size_t size,
|
|||
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
|
||||
phys_addr_t paddr = dma_handle;
|
||||
struct page *page = virt_to_page(paddr);
|
||||
int is_non_coh = 1;
|
||||
|
||||
|
@ -111,7 +111,7 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
{
|
||||
unsigned long user_count = vma_pages(vma);
|
||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn = __phys_to_pfn(plat_dma_to_phys(dev, dma_addr));
|
||||
unsigned long pfn = __phys_to_pfn(dma_addr);
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
int ret = -ENXIO;
|
||||
|
||||
|
@ -175,7 +175,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
|
|||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
_dma_cache_sync(paddr, size, dir);
|
||||
|
||||
return plat_phys_to_dma(dev, paddr);
|
||||
return paddr;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -190,7 +190,7 @@ static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
|||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t paddr = plat_dma_to_phys(dev, handle);
|
||||
phys_addr_t paddr = handle;
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
_dma_cache_sync(paddr, size, dir);
|
||||
|
@ -224,13 +224,13 @@ static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|||
static void arc_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
|
||||
_dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void arc_dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
|
||||
_dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static void arc_dma_sync_sg_for_cpu(struct device *dev,
|
||||
|
|
|
@ -8,6 +8,7 @@ config ARM
|
|||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
|
||||
select ARCH_HAS_STRICT_MODULE_RWX if MMU
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
|
@ -24,7 +25,7 @@ config ARM
|
|||
select CLONE_BACKWARDS
|
||||
select CPU_PM if (SUSPEND || CPU_IDLE)
|
||||
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select DMA_NOOP_OPS if !MMU
|
||||
select DMA_DIRECT_OPS if !MMU
|
||||
select EDAC_SUPPORT
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select GENERIC_ALLOCATOR
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASM_ARM_DMA_DIRECT_H
|
||||
#define ASM_ARM_DMA_DIRECT_H 1
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
unsigned int offset = paddr & ~PAGE_MASK;
|
||||
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
unsigned int offset = dev_addr & ~PAGE_MASK;
|
||||
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
u64 limit, mask;
|
||||
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
mask = *dev->dma_mask;
|
||||
|
||||
limit = (mask + 1) & ~mask;
|
||||
if (limit && size > limit)
|
||||
return 0;
|
||||
|
||||
if ((addr | (addr + size - 1)) & ~mask)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
#endif /* ASM_ARM_DMA_DIRECT_H */
|
|
@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
|
|||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_noop_ops;
|
||||
return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops;
|
||||
}
|
||||
|
||||
#ifdef __arch_page_to_dma
|
||||
|
@ -109,39 +109,6 @@ static inline bool is_device_dma_coherent(struct device *dev)
|
|||
return dev->archdata.dma_coherent;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
unsigned int offset = paddr & ~PAGE_MASK;
|
||||
return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
unsigned int offset = dev_addr & ~PAGE_MASK;
|
||||
return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
u64 limit, mask;
|
||||
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
mask = *dev->dma_mask;
|
||||
|
||||
limit = (mask + 1) & ~mask;
|
||||
if (limit && size > limit)
|
||||
return 0;
|
||||
|
||||
if ((addr | (addr + size - 1)) & ~mask)
|
||||
return 0;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) { }
|
||||
|
||||
/**
|
||||
* arm_dma_alloc - allocate consistent memory for DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#include <asm/cachetype.h>
|
||||
|
@ -22,7 +22,7 @@
|
|||
#include "dma.h"
|
||||
|
||||
/*
|
||||
* dma_noop_ops is used if
|
||||
* dma_direct_ops is used if
|
||||
* - MMU/MPU is off
|
||||
* - cpu is v7m w/o cache support
|
||||
* - device is coherent
|
||||
|
@ -39,7 +39,6 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
|
|||
unsigned long attrs)
|
||||
|
||||
{
|
||||
const struct dma_map_ops *ops = &dma_noop_ops;
|
||||
void *ret;
|
||||
|
||||
/*
|
||||
|
@ -48,7 +47,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
|
|||
*/
|
||||
|
||||
if (attrs & DMA_ATTR_NON_CONSISTENT)
|
||||
return ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
return dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
|
||||
|
||||
ret = dma_alloc_from_global_coherent(size, dma_handle);
|
||||
|
||||
|
@ -70,10 +69,8 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
|
|||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = &dma_noop_ops;
|
||||
|
||||
if (attrs & DMA_ATTR_NON_CONSISTENT) {
|
||||
ops->free(dev, size, cpu_addr, dma_addr, attrs);
|
||||
dma_direct_free(dev, size, cpu_addr, dma_addr, attrs);
|
||||
} else {
|
||||
int ret = dma_release_from_global_coherent(get_order(size),
|
||||
cpu_addr);
|
||||
|
@ -213,7 +210,7 @@ EXPORT_SYMBOL(arm_nommu_dma_ops);
|
|||
|
||||
static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
|
||||
{
|
||||
return coherent ? &dma_noop_ops : &arm_nommu_dma_ops;
|
||||
return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
|
||||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
|
|
|
@ -59,6 +59,7 @@ config ARM64
|
|||
select COMMON_CLK
|
||||
select CPU_PM if (SUSPEND || CPU_IDLE)
|
||||
select DCACHE_WORD_ACCESS
|
||||
select DMA_DIRECT_OPS
|
||||
select EDAC_SUPPORT
|
||||
select FRAME_POINTER
|
||||
select GENERIC_ALLOCATOR
|
||||
|
@ -227,7 +228,7 @@ config GENERIC_CSUM
|
|||
config GENERIC_CALIBRATE_DELAY
|
||||
def_bool y
|
||||
|
||||
config ZONE_DMA
|
||||
config ZONE_DMA32
|
||||
def_bool y
|
||||
|
||||
config HAVE_GENERIC_GUP
|
||||
|
|
|
@ -50,40 +50,5 @@ static inline bool is_device_dma_coherent(struct device *dev)
|
|||
return dev->archdata.dma_coherent;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
dma_addr_t dev_addr = (dma_addr_t)paddr;
|
||||
|
||||
return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
||||
{
|
||||
phys_addr_t paddr = (phys_addr_t)dev_addr;
|
||||
|
||||
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size)
|
||||
{
|
||||
}
|
||||
|
||||
/* Override for dma_max_pfn() */
|
||||
static inline unsigned long dma_max_pfn(struct device *dev)
|
||||
{
|
||||
dma_addr_t dma_max = (dma_addr_t)*dev->dma_mask;
|
||||
|
||||
return (ulong)dma_to_phys(dev, dma_max) >> PAGE_SHIFT;
|
||||
}
|
||||
#define dma_max_pfn(dev) dma_max_pfn(dev)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_DMA_MAPPING_H */
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
@ -91,46 +91,6 @@ static int __free_from_pool(void *start, size_t size)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void *__dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
|
||||
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
|
||||
flags |= GFP_DMA;
|
||||
if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||
get_order(size), flags);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
||||
addr = page_address(page);
|
||||
memset(addr, 0, size);
|
||||
return addr;
|
||||
} else {
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void __dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
bool freed;
|
||||
phys_addr_t paddr = dma_to_phys(dev, dma_handle);
|
||||
|
||||
|
||||
freed = dma_release_from_contiguous(dev,
|
||||
phys_to_page(paddr),
|
||||
size >> PAGE_SHIFT);
|
||||
if (!freed)
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
static void *__dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
unsigned long attrs)
|
||||
|
@ -152,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
|
|||
return addr;
|
||||
}
|
||||
|
||||
ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
|
||||
ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
|
||||
if (!ptr)
|
||||
goto no_mem;
|
||||
|
||||
|
@ -173,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
|
|||
return coherent_ptr;
|
||||
|
||||
no_map:
|
||||
__dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
|
||||
swiotlb_free(dev, size, ptr, *dma_handle, attrs);
|
||||
no_mem:
|
||||
return NULL;
|
||||
}
|
||||
|
@ -191,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
|
|||
return;
|
||||
vunmap(vaddr);
|
||||
}
|
||||
__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
|
||||
swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
|
||||
|
@ -368,7 +328,7 @@ static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct dma_map_ops swiotlb_dma_ops = {
|
||||
static const struct dma_map_ops arm64_swiotlb_dma_ops = {
|
||||
.alloc = __dma_alloc,
|
||||
.free = __dma_free,
|
||||
.mmap = __swiotlb_mmap,
|
||||
|
@ -397,7 +357,7 @@ static int __init atomic_pool_init(void)
|
|||
page = dma_alloc_from_contiguous(NULL, nr_pages,
|
||||
pool_size_order, GFP_KERNEL);
|
||||
else
|
||||
page = alloc_pages(GFP_DMA, pool_size_order);
|
||||
page = alloc_pages(GFP_DMA32, pool_size_order);
|
||||
|
||||
if (page) {
|
||||
int ret;
|
||||
|
@ -923,7 +883,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
if (!dev->dma_ops)
|
||||
dev->dma_ops = &swiotlb_dma_ops;
|
||||
dev->dma_ops = &arm64_swiotlb_dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
|
||||
|
|
|
@ -217,7 +217,7 @@ static void __init reserve_elfcorehdr(void)
|
|||
}
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
/*
|
||||
* Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
|
||||
* Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
|
||||
* currently assumes that for memory starting above 4G, 32-bit devices will
|
||||
* use a DMA offset.
|
||||
*/
|
||||
|
@ -233,8 +233,8 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
{
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
||||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
|
||||
max_zone_pfns[ZONE_NORMAL] = max;
|
||||
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
|
@ -251,9 +251,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
memset(zone_size, 0, sizeof(zone_size));
|
||||
|
||||
/* 4GB maximum for 32-bit only capable devices */
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_dma = PFN_DOWN(arm64_dma_phys_limit);
|
||||
zone_size[ZONE_DMA] = max_dma - min;
|
||||
zone_size[ZONE_DMA32] = max_dma - min;
|
||||
#endif
|
||||
zone_size[ZONE_NORMAL] = max - max_dma;
|
||||
|
||||
|
@ -266,10 +266,10 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
if (start >= max)
|
||||
continue;
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
if (start < max_dma) {
|
||||
unsigned long dma_end = min(end, max_dma);
|
||||
zhole_size[ZONE_DMA] -= dma_end - start;
|
||||
zhole_size[ZONE_DMA32] -= dma_end - start;
|
||||
}
|
||||
#endif
|
||||
if (end > max_dma) {
|
||||
|
@ -470,7 +470,7 @@ void __init arm64_memblock_init(void)
|
|||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
/* 4GB maximum for 32-bit only capable devices */
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
arm64_dma_phys_limit = max_zone_dma_phys();
|
||||
else
|
||||
arm64_dma_phys_limit = PHYS_MASK + 1;
|
||||
|
|
|
@ -33,6 +33,9 @@ config GENERIC_CALIBRATE_DELAY
|
|||
config NO_IOPORT_MAP
|
||||
def_bool y if !PCI
|
||||
|
||||
config NO_DMA
|
||||
def_bool y if !PCI
|
||||
|
||||
config FORCE_MAX_ZONEORDER
|
||||
int
|
||||
default 6
|
||||
|
@ -72,6 +75,7 @@ config CRIS
|
|||
select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32
|
||||
select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32
|
||||
select HAVE_NMI
|
||||
select DMA_DIRECT_OPS if PCI
|
||||
|
||||
config HZ
|
||||
int
|
||||
|
|
|
@ -2,4 +2,4 @@
|
|||
# Makefile for Etrax cardbus driver
|
||||
#
|
||||
|
||||
obj-$(CONFIG_ETRAX_CARDBUS) += bios.o dma.o
|
||||
obj-$(CONFIG_ETRAX_CARDBUS) += bios.o
|
||||
|
|
|
@ -1,80 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Dynamic DMA mapping support.
|
||||
*
|
||||
* On cris there is no hardware dynamic DMA address translation,
|
||||
* so consistent alloc/free are merely page allocation/freeing.
|
||||
* The rest of the dynamic DMA mapping interface is implemented
|
||||
* in asm/pci.h.
|
||||
*
|
||||
* Borrowed from i386.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static void *v32_dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
|
||||
gfp |= GFP_DMA;
|
||||
|
||||
ret = (void *)__get_free_pages(gfp, get_order(size));
|
||||
|
||||
if (ret != NULL) {
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = virt_to_phys(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
|
||||
static inline dma_addr_t v32_dma_map_page(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction, unsigned long attrs)
|
||||
{
|
||||
return page_to_phys(page) + offset;
|
||||
}
|
||||
|
||||
static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
printk("Map sg\n");
|
||||
return nents;
|
||||
}
|
||||
|
||||
static inline int v32_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/*
|
||||
* we fall back to GFP_DMA when the mask isn't all 1s,
|
||||
* so we can't guarantee allocations that must be
|
||||
* within a tighter range than GFP_DMA..
|
||||
*/
|
||||
if (mask < 0x00ffffff)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
const struct dma_map_ops v32_dma_ops = {
|
||||
.alloc = v32_dma_alloc,
|
||||
.free = v32_dma_free,
|
||||
.map_page = v32_dma_map_page,
|
||||
.map_sg = v32_dma_map_sg,
|
||||
.dma_supported = v32_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(v32_dma_ops);
|
|
@ -5,6 +5,7 @@ generic-y += cmpxchg.h
|
|||
generic-y += current.h
|
||||
generic-y += device.h
|
||||
generic-y += div64.h
|
||||
generic-y += dma-mapping.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += exec.h
|
||||
generic-y += extable.h
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_CRIS_DMA_MAPPING_H
|
||||
#define _ASM_CRIS_DMA_MAPPING_H
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern const struct dma_map_ops v32_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &v32_dma_ops;
|
||||
}
|
||||
#else
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
BUG();
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -23,6 +23,7 @@ config H8300
|
|||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_HASH
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
select DMA_DIRECT_OPS
|
||||
|
||||
config CPU_BIG_ENDIAN
|
||||
def_bool y
|
||||
|
|
|
@ -9,6 +9,7 @@ generic-y += delay.h
|
|||
generic-y += device.h
|
||||
generic-y += div64.h
|
||||
generic-y += dma.h
|
||||
generic-y += dma-mapping.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += exec.h
|
||||
generic-y += extable.h
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _H8300_DMA_MAPPING_H
|
||||
#define _H8300_DMA_MAPPING_H
|
||||
|
||||
extern const struct dma_map_ops h8300_dma_map_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &h8300_dma_map_ops;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -7,7 +7,7 @@ extra-y := vmlinux.lds
|
|||
|
||||
obj-y := process.o traps.o ptrace.o \
|
||||
signal.o setup.o syscalls.o \
|
||||
irq.o entry.o dma.o
|
||||
irq.o entry.o
|
||||
|
||||
obj-$(CONFIG_ROMKERNEL) += head_rom.o
|
||||
obj-$(CONFIG_RAMKERNEL) += head_ram.o
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file COPYING in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/pgalloc.h>
|
||||
|
||||
static void *dma_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
|
||||
gfp |= GFP_DMA;
|
||||
ret = (void *)__get_free_pages(gfp, get_order(size));
|
||||
|
||||
if (ret != NULL) {
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = virt_to_phys(ret);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dma_free(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
|
||||
{
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
|
||||
static dma_addr_t map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return page_to_phys(page) + offset;
|
||||
}
|
||||
|
||||
static int map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg);
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
const struct dma_map_ops h8300_dma_map_ops = {
|
||||
.alloc = dma_alloc,
|
||||
.free = dma_free,
|
||||
.map_page = map_page,
|
||||
.map_sg = map_sg,
|
||||
};
|
||||
EXPORT_SYMBOL(h8300_dma_map_ops);
|
|
@ -37,11 +37,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return dma_ops;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -330,8 +330,6 @@ static inline void outsl(unsigned long port, const void *buffer, int count)
|
|||
}
|
||||
}
|
||||
|
||||
#define flush_write_buffers() do { } while (0)
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <asm/dma-mapping.h>
|
||||
|
|
|
@ -33,6 +33,7 @@ config IA64
|
|||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_VIRT_CPU_ACCOUNTING
|
||||
select ARCH_HAS_DMA_MARK_CLEAN
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select VIRT_TO_BUS
|
||||
select ARCH_DISCARD_MEMBLOCK
|
||||
|
@ -65,7 +66,7 @@ config 64BIT
|
|||
select ATA_NONSTANDARD if ATA
|
||||
default y
|
||||
|
||||
config ZONE_DMA
|
||||
config ZONE_DMA32
|
||||
def_bool y
|
||||
depends on !IA64_SGI_SN2
|
||||
|
||||
|
@ -145,6 +146,7 @@ config IA64_GENERIC
|
|||
bool "generic"
|
||||
select NUMA
|
||||
select ACPI_NUMA
|
||||
select DMA_DIRECT_OPS
|
||||
select SWIOTLB
|
||||
select PCI_MSI
|
||||
help
|
||||
|
@ -165,6 +167,7 @@ config IA64_GENERIC
|
|||
|
||||
config IA64_DIG
|
||||
bool "DIG-compliant"
|
||||
select DMA_DIRECT_OPS
|
||||
select SWIOTLB
|
||||
|
||||
config IA64_DIG_VTD
|
||||
|
@ -180,6 +183,7 @@ config IA64_HP_ZX1
|
|||
|
||||
config IA64_HP_ZX1_SWIOTLB
|
||||
bool "HP-zx1/sx1000 with software I/O TLB"
|
||||
select DMA_DIRECT_OPS
|
||||
select SWIOTLB
|
||||
help
|
||||
Build a kernel that runs on HP zx1 and sx1000 systems even when they
|
||||
|
@ -203,6 +207,7 @@ config IA64_SGI_UV
|
|||
bool "SGI-UV"
|
||||
select NUMA
|
||||
select ACPI_NUMA
|
||||
select DMA_DIRECT_OPS
|
||||
select SWIOTLB
|
||||
help
|
||||
Selecting this option will optimize the kernel for use on UV based
|
||||
|
@ -213,6 +218,7 @@ config IA64_SGI_UV
|
|||
|
||||
config IA64_HP_SIM
|
||||
bool "Ski-simulator"
|
||||
select DMA_DIRECT_OPS
|
||||
select SWIOTLB
|
||||
depends on !PM
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <asm/machvec.h>
|
||||
|
||||
extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
|
||||
extern const struct dma_map_ops sba_dma_ops;
|
||||
|
||||
/* swiotlb declarations & definitions: */
|
||||
extern int swiotlb_late_init_with_default_size (size_t size);
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
*/
|
||||
#include <asm/machvec.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
|
||||
|
@ -27,22 +26,4 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return platform_dma_get_ops(NULL);
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr;
|
||||
}
|
||||
|
||||
#endif /* _ASM_IA64_DMA_MAPPING_H */
|
||||
|
|
|
@ -20,6 +20,4 @@ extern unsigned long MAX_DMA_ADDRESS;
|
|||
|
||||
#define free_dma(x)
|
||||
|
||||
void dma_mark_clean(void *addr, size_t size);
|
||||
|
||||
#endif /* _ASM_IA64_DMA_H */
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASM_IA64__SWIOTLB_H
|
||||
#define ASM_IA64__SWIOTLB_H
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
extern int swiotlb;
|
||||
extern void pci_swiotlb_init(void);
|
||||
#else
|
||||
#define swiotlb 0
|
||||
static inline void pci_swiotlb_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* ASM_IA64__SWIOTLB_H */
|
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/* Set this to 1 if there is a HW IOMMU in the system */
|
||||
|
@ -23,3 +24,11 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
|
|||
return dma_ops;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_get_ops);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
void __init swiotlb_dma_init(void)
|
||||
{
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
swiotlb_init(1);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -12,12 +12,7 @@
|
|||
#include <asm/iommu.h>
|
||||
#include <asm/machvec.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
dma_addr_t bad_dma_address __read_mostly;
|
||||
|
@ -104,8 +99,14 @@ void __init pci_iommu_alloc(void)
|
|||
detect_intel_iommu();
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
pci_swiotlb_init();
|
||||
#endif
|
||||
if (!iommu_detected) {
|
||||
#ifdef CONFIG_IA64_GENERIC
|
||||
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
|
||||
machvec_init("dig");
|
||||
swiotlb_dma_init();
|
||||
#else
|
||||
panic("Unable to find Intel IOMMU");
|
||||
#endif /* CONFIG_IA64_GENERIC */
|
||||
}
|
||||
#endif /* CONFIG_SWIOTLB */
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,68 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Glue code to lib/swiotlb.c */
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <asm/swiotlb.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/machvec.h>
|
||||
|
||||
int swiotlb __read_mostly;
|
||||
EXPORT_SYMBOL(swiotlb);
|
||||
|
||||
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
|
||||
gfp |= GFP_DMA;
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
}
|
||||
|
||||
static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
||||
}
|
||||
|
||||
const struct dma_map_ops swiotlb_dma_ops = {
|
||||
.alloc = ia64_swiotlb_alloc_coherent,
|
||||
.free = ia64_swiotlb_free_coherent,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
|
||||
void __init swiotlb_dma_init(void)
|
||||
{
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
swiotlb_init(1);
|
||||
}
|
||||
|
||||
void __init pci_swiotlb_init(void)
|
||||
{
|
||||
if (!iommu_detected) {
|
||||
#ifdef CONFIG_IA64_GENERIC
|
||||
swiotlb = 1;
|
||||
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
|
||||
machvec_init("dig");
|
||||
swiotlb_init(1);
|
||||
dma_ops = &swiotlb_dma_ops;
|
||||
#else
|
||||
panic("Unable to find Intel IOMMU");
|
||||
#endif
|
||||
}
|
||||
}
|
|
@ -237,9 +237,9 @@ paging_init (void)
|
|||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
|
||||
max_zone_pfns[ZONE_DMA] = max_dma;
|
||||
max_zone_pfns[ZONE_DMA32] = max_dma;
|
||||
#endif
|
||||
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ struct early_node_data {
|
|||
struct ia64_node_data *node_data;
|
||||
unsigned long pernode_addr;
|
||||
unsigned long pernode_size;
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
unsigned long num_dma_physpages;
|
||||
#endif
|
||||
unsigned long min_pfn;
|
||||
|
@ -669,7 +669,7 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
|
|||
{
|
||||
unsigned long end = start + len;
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
if (start <= __pa(MAX_DMA_ADDRESS))
|
||||
mem_data[node].num_dma_physpages +=
|
||||
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
|
||||
|
@ -724,8 +724,8 @@ void __init paging_init(void)
|
|||
}
|
||||
|
||||
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
max_zone_pfns[ZONE_DMA] = max_dma;
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = max_dma;
|
||||
#endif
|
||||
max_zone_pfns[ZONE_NORMAL] = max_pfn;
|
||||
free_area_init_nodes(max_zone_pfns);
|
||||
|
|
|
@ -19,7 +19,7 @@ config M32R
|
|||
select MODULES_USE_ELF_RELA
|
||||
select HAVE_DEBUG_STACKOVERFLOW
|
||||
select CPU_NO_EFFICIENT_FFS
|
||||
select DMA_NOOP_OPS
|
||||
select DMA_DIRECT_OPS
|
||||
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
|
||||
|
||||
config SBUS
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
generic-y += clkdev.h
|
||||
generic-y += current.h
|
||||
generic-y += dma-mapping.h
|
||||
generic-y += exec.h
|
||||
generic-y += extable.h
|
||||
generic-y += irq_work.h
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_M32R_DMA_MAPPING_H
|
||||
#define _ASM_M32R_DMA_MAPPING_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &dma_noop_ops;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
#endif /* _ASM_M32R_DMA_MAPPING_H */
|
|
@ -191,8 +191,6 @@ static inline void _writel(unsigned long l, unsigned long addr)
|
|||
|
||||
#define mmiowb()
|
||||
|
||||
#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
|
||||
|
||||
static inline void
|
||||
memset_io(volatile void __iomem *addr, unsigned char val, int count)
|
||||
{
|
||||
|
|
|
@ -76,8 +76,6 @@ static void *m68k_dma_alloc(struct device *dev, size_t size,
|
|||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
if (dev == NULL || (*dev->dma_mask < 0xffffffff))
|
||||
gfp |= GFP_DMA;
|
||||
|
|
|
@ -18,11 +18,11 @@
|
|||
/*
|
||||
* Available generic sets of operations
|
||||
*/
|
||||
extern const struct dma_map_ops dma_direct_ops;
|
||||
extern const struct dma_map_ops dma_nommu_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &dma_direct_ops;
|
||||
return &dma_nommu_ops;
|
||||
}
|
||||
|
||||
#endif /* _ASM_MICROBLAZE_DMA_MAPPING_H */
|
||||
|
|
|
@ -15,42 +15,18 @@
|
|||
#include <linux/bug.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#define NOT_COHERENT_CACHE
|
||||
|
||||
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
#ifdef NOT_COHERENT_CACHE
|
||||
return consistent_alloc(flag, size, dma_handle);
|
||||
#else
|
||||
void *ret;
|
||||
struct page *page;
|
||||
int node = dev_to_node(dev);
|
||||
|
||||
/* ignore region specifiers */
|
||||
flag &= ~(__GFP_HIGHMEM);
|
||||
|
||||
page = alloc_pages_node(node, flag, get_order(size));
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
ret = page_address(page);
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = virt_to_phys(ret);
|
||||
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
static void dma_nommu_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
#ifdef NOT_COHERENT_CACHE
|
||||
consistent_free(size, vaddr);
|
||||
#else
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void __dma_sync(unsigned long paddr,
|
||||
|
@ -69,7 +45,7 @@ static inline void __dma_sync(unsigned long paddr,
|
|||
}
|
||||
}
|
||||
|
||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -89,12 +65,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
return nents;
|
||||
}
|
||||
|
||||
static int dma_direct_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||
static inline dma_addr_t dma_nommu_map_page(struct device *dev,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
|
@ -106,7 +77,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
|||
return page_to_phys(page) + offset;
|
||||
}
|
||||
|
||||
static inline void dma_direct_unmap_page(struct device *dev,
|
||||
static inline void dma_nommu_unmap_page(struct device *dev,
|
||||
dma_addr_t dma_address,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
|
@ -122,7 +93,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
|
|||
}
|
||||
|
||||
static inline void
|
||||
dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_nommu_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
@ -136,7 +107,7 @@ dma_direct_sync_single_for_cpu(struct device *dev,
|
|||
}
|
||||
|
||||
static inline void
|
||||
dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_nommu_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
@ -150,7 +121,7 @@ dma_direct_sync_single_for_device(struct device *dev,
|
|||
}
|
||||
|
||||
static inline void
|
||||
dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
dma_nommu_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
@ -164,7 +135,7 @@ dma_direct_sync_sg_for_cpu(struct device *dev,
|
|||
}
|
||||
|
||||
static inline void
|
||||
dma_direct_sync_sg_for_device(struct device *dev,
|
||||
dma_nommu_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
@ -178,7 +149,7 @@ dma_direct_sync_sg_for_device(struct device *dev,
|
|||
}
|
||||
|
||||
static
|
||||
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -191,12 +162,8 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
|||
if (off >= count || user_count > (count - off))
|
||||
return -ENXIO;
|
||||
|
||||
#ifdef NOT_COHERENT_CACHE
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
pfn = consistent_virt_to_pfn(cpu_addr);
|
||||
#else
|
||||
pfn = virt_to_pfn(cpu_addr);
|
||||
#endif
|
||||
return remap_pfn_range(vma, vma->vm_start, pfn + off,
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
||||
#else
|
||||
|
@ -204,20 +171,19 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
|||
#endif
|
||||
}
|
||||
|
||||
const struct dma_map_ops dma_direct_ops = {
|
||||
.alloc = dma_direct_alloc_coherent,
|
||||
.free = dma_direct_free_coherent,
|
||||
.mmap = dma_direct_mmap_coherent,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.dma_supported = dma_direct_dma_supported,
|
||||
.map_page = dma_direct_map_page,
|
||||
.unmap_page = dma_direct_unmap_page,
|
||||
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
|
||||
.sync_single_for_device = dma_direct_sync_single_for_device,
|
||||
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = dma_direct_sync_sg_for_device,
|
||||
const struct dma_map_ops dma_nommu_ops = {
|
||||
.alloc = dma_nommu_alloc_coherent,
|
||||
.free = dma_nommu_free_coherent,
|
||||
.mmap = dma_nommu_mmap_coherent,
|
||||
.map_sg = dma_nommu_map_sg,
|
||||
.map_page = dma_nommu_map_page,
|
||||
.unmap_page = dma_nommu_unmap_page,
|
||||
.sync_single_for_cpu = dma_nommu_sync_single_for_cpu,
|
||||
.sync_single_for_device = dma_nommu_sync_single_for_device,
|
||||
.sync_sg_for_cpu = dma_nommu_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = dma_nommu_sync_sg_for_device,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_direct_ops);
|
||||
EXPORT_SYMBOL(dma_nommu_ops);
|
||||
|
||||
/* Number of entries preallocated for DMA-API debugging */
|
||||
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
|
||||
|
|
|
@ -431,6 +431,7 @@ config MACH_LOONGSON32
|
|||
|
||||
config MACH_LOONGSON64
|
||||
bool "Loongson-2/3 family of machines"
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select SYS_SUPPORTS_ZBOOT
|
||||
help
|
||||
This enables the support of Loongson-2/3 family of machines.
|
||||
|
@ -880,6 +881,7 @@ config MIKROTIK_RB532
|
|||
config CAVIUM_OCTEON_SOC
|
||||
bool "Cavium Networks Octeon SoC based boards"
|
||||
select CEVT_R4K
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_PHYS_ADDR_T_64BIT
|
||||
select DMA_COHERENT
|
||||
select SYS_SUPPORTS_64BIT_KERNEL
|
||||
|
|
|
@ -75,6 +75,7 @@ config NEED_SG_DMA_LENGTH
|
|||
|
||||
config SWIOTLB
|
||||
def_bool y
|
||||
select DMA_DIRECT_OPS
|
||||
select IOMMU_HELPER
|
||||
select NEED_SG_DMA_LENGTH
|
||||
|
||||
|
|
|
@ -159,36 +159,13 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
|
|||
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && dev == NULL)
|
||||
gfp |= __GFP_DMA;
|
||||
else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
|
||||
dev->coherent_dma_mask <= DMA_BIT_MASK(24))
|
||||
gfp |= __GFP_DMA;
|
||||
else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
|
||||
gfp |= __GFP_DMA32;
|
||||
|
||||
/* Don't invoke OOM killer */
|
||||
gfp |= __GFP_NORETRY;
|
||||
|
||||
ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
|
||||
|
||||
mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void octeon_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
|
@ -228,7 +205,7 @@ EXPORT_SYMBOL(dma_to_phys);
|
|||
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
|
||||
.dma_map_ops = {
|
||||
.alloc = octeon_dma_alloc_coherent,
|
||||
.free = octeon_dma_free_coherent,
|
||||
.free = swiotlb_free,
|
||||
.map_page = octeon_dma_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = octeon_dma_map_sg,
|
||||
|
@ -314,7 +291,7 @@ void __init plat_swiotlb_setup(void)
|
|||
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
|
||||
.dma_map_ops = {
|
||||
.alloc = octeon_dma_alloc_coherent,
|
||||
.free = octeon_dma_free_coherent,
|
||||
.free = swiotlb_free,
|
||||
.map_page = octeon_dma_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = octeon_dma_map_sg,
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
#include <asm/dma-coherence.h>
|
|
@ -17,16 +17,6 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return mips_dma_map_ops;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
|
||||
return addr + size <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
||||
#define arch_setup_dma_ops arch_setup_dma_ops
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size, const struct iommu_ops *iommu,
|
||||
|
|
|
@ -61,6 +61,14 @@ static inline void plat_post_dma_flush(struct device *dev)
|
|||
{
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
|
||||
|
|
|
@ -70,16 +70,4 @@ static inline void plat_post_dma_flush(struct device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
|
||||
|
|
|
@ -17,6 +17,14 @@
|
|||
|
||||
struct device;
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
|
||||
|
|
|
@ -87,9 +87,6 @@ unsigned int nlm_get_cpu_frequency(void);
|
|||
extern const struct plat_smp_ops nlm_smp_ops;
|
||||
extern char nlm_reset_entry[], nlm_reset_entry_end[];
|
||||
|
||||
/* SWIOTLB */
|
||||
extern const struct dma_map_ops nlm_swiotlb_dma_ops;
|
||||
|
||||
extern unsigned int nlm_threads_per_core;
|
||||
extern cpumask_t nlm_cpumask;
|
||||
|
||||
|
|
|
@ -136,6 +136,7 @@ config SWIOTLB
|
|||
bool "Soft IOMMU Support for All-Memory DMA"
|
||||
default y
|
||||
depends on CPU_LOONGSON3
|
||||
select DMA_DIRECT_OPS
|
||||
select IOMMU_HELPER
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select NEED_DMA_MAP_STATE
|
||||
|
|
|
@ -13,32 +13,12 @@
|
|||
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
void *ret;
|
||||
void *ret = swiotlb_alloc(dev, size, dma_handle, gfp, attrs);
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
if ((IS_ENABLED(CONFIG_ISA) && dev == NULL) ||
|
||||
(IS_ENABLED(CONFIG_ZONE_DMA) &&
|
||||
dev->coherent_dma_mask < DMA_BIT_MASK(32)))
|
||||
gfp |= __GFP_DMA;
|
||||
else if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
|
||||
dev->coherent_dma_mask < DMA_BIT_MASK(40))
|
||||
gfp |= __GFP_DMA32;
|
||||
|
||||
gfp |= __GFP_NORETRY;
|
||||
|
||||
ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
mb();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void loongson_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
|
@ -109,7 +89,7 @@ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||
|
||||
static const struct dma_map_ops loongson_dma_map_ops = {
|
||||
.alloc = loongson_dma_alloc_coherent,
|
||||
.free = loongson_dma_free_coherent,
|
||||
.free = swiotlb_free,
|
||||
.map_page = loongson_dma_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = loongson_dma_map_sg,
|
||||
|
|
|
@ -93,9 +93,6 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
|
|||
{
|
||||
gfp_t dma_flag;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
if (dev == NULL)
|
||||
dma_flag = __GFP_DMA;
|
||||
|
|
|
@ -89,9 +89,4 @@ config IOMMU_HELPER
|
|||
config NEED_SG_DMA_LENGTH
|
||||
bool
|
||||
|
||||
config SWIOTLB
|
||||
def_bool y
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select IOMMU_HELPER
|
||||
|
||||
endif
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
obj-y += irq.o time.o
|
||||
obj-y += nlm-dma.o
|
||||
obj-y += reset.o
|
||||
obj-$(CONFIG_SMP) += smp.o smpboot.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2003-2013 Broadcom Corporation
|
||||
* All Rights Reserved
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the Broadcom
|
||||
* license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
|
||||
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#include <asm/bootinfo.h>
|
||||
|
||||
static char *nlm_swiotlb;
|
||||
|
||||
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
|
||||
gfp |= __GFP_DMA32;
|
||||
#endif
|
||||
|
||||
/* Don't invoke OOM killer */
|
||||
gfp |= __GFP_NORETRY;
|
||||
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
}
|
||||
|
||||
static void nlm_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
const struct dma_map_ops nlm_swiotlb_dma_ops = {
|
||||
.alloc = nlm_dma_alloc_coherent,
|
||||
.free = nlm_dma_free_coherent,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
.dma_supported = swiotlb_dma_supported
|
||||
};
|
||||
|
||||
void __init plat_swiotlb_setup(void)
|
||||
{
|
||||
size_t swiotlbsize;
|
||||
unsigned long swiotlb_nslabs;
|
||||
|
||||
swiotlbsize = 1 << 20; /* 1 MB for now */
|
||||
swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
|
||||
swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
|
||||
swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
|
||||
|
||||
nlm_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
|
||||
swiotlb_init_with_tbl(nlm_swiotlb, swiotlb_nslabs, 1);
|
||||
}
|
|
@ -37,9 +37,6 @@ static void *mn10300_dma_alloc(struct device *dev, size_t size,
|
|||
goto done;
|
||||
}
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
|
||||
gfp |= GFP_DMA;
|
||||
|
||||
|
|
|
@ -63,9 +63,6 @@ static void *nios2_dma_alloc(struct device *dev, size_t size,
|
|||
{
|
||||
void *ret;
|
||||
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
/* optimized page clearing */
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
|
|
|
@ -75,11 +75,6 @@ void dump_resmap(void)
|
|||
static inline void dump_resmap(void) {;}
|
||||
#endif
|
||||
|
||||
static int pa11_dma_supported( struct device *dev, u64 mask)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int map_pte_uncached(pte_t * pte,
|
||||
unsigned long vaddr,
|
||||
unsigned long size, unsigned long *paddr_ptr)
|
||||
|
@ -579,7 +574,6 @@ static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|||
}
|
||||
|
||||
const struct dma_map_ops pcxl_dma_ops = {
|
||||
.dma_supported = pa11_dma_supported,
|
||||
.alloc = pa11_dma_alloc,
|
||||
.free = pa11_dma_free,
|
||||
.map_page = pa11_dma_map_page,
|
||||
|
@ -616,7 +610,6 @@ static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
|
|||
}
|
||||
|
||||
const struct dma_map_ops pcx_dma_ops = {
|
||||
.dma_supported = pa11_dma_supported,
|
||||
.alloc = pcx_dma_alloc,
|
||||
.free = pcx_dma_free,
|
||||
.map_page = pa11_dma_map_page,
|
||||
|
|
|
@ -139,6 +139,7 @@ config PPC
|
|||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_HAS_PMEM_API if PPC64
|
||||
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASM_POWERPC_DMA_DIRECT_H
|
||||
#define ASM_POWERPC_DMA_DIRECT_H 1
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
struct dev_archdata *sd = &dev->archdata;
|
||||
|
||||
if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + get_dma_offset(dev);
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr - get_dma_offset(dev);
|
||||
}
|
||||
#endif /* ASM_POWERPC_DMA_DIRECT_H */
|
|
@ -19,13 +19,13 @@
|
|||
#include <asm/swiotlb.h>
|
||||
|
||||
/* Some dma direct funcs must be visible for use in other dma_ops */
|
||||
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
extern void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs);
|
||||
extern void __dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
extern void __dma_nommu_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs);
|
||||
extern int dma_direct_mmap_coherent(struct device *dev,
|
||||
extern int dma_nommu_mmap_coherent(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle,
|
||||
size_t size, unsigned long attrs);
|
||||
|
@ -73,7 +73,7 @@ static inline unsigned long device_to_mask(struct device *dev)
|
|||
#ifdef CONFIG_PPC64
|
||||
extern struct dma_map_ops dma_iommu_ops;
|
||||
#endif
|
||||
extern const struct dma_map_ops dma_direct_ops;
|
||||
extern const struct dma_map_ops dma_nommu_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
|
@ -107,39 +107,11 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
|||
dev->archdata.dma_offset = off;
|
||||
}
|
||||
|
||||
/* this will be removed soon */
|
||||
#define flush_write_buffers()
|
||||
|
||||
#define HAVE_ARCH_DMA_SET_MASK 1
|
||||
extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
||||
|
||||
extern u64 __dma_get_required_mask(struct device *dev);
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
struct dev_archdata *sd = &dev->archdata;
|
||||
|
||||
if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
|
||||
return false;
|
||||
#endif
|
||||
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + get_dma_offset(dev);
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr - get_dma_offset(dev);
|
||||
}
|
||||
|
||||
#define ARCH_HAS_DMA_MMAP_COHERENT
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
|
@ -13,9 +13,7 @@
|
|||
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
extern const struct dma_map_ops swiotlb_dma_ops;
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
extern const struct dma_map_ops powerpc_swiotlb_dma_ops;
|
||||
|
||||
extern unsigned int ppc_swiotlb_enable;
|
||||
int __init swiotlb_setup_bus_notifier(void);
|
||||
|
|
|
@ -114,7 +114,7 @@ int dma_iommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||
struct dma_map_ops dma_iommu_ops = {
|
||||
.alloc = dma_iommu_alloc_coherent,
|
||||
.free = dma_iommu_free_coherent,
|
||||
.mmap = dma_direct_mmap_coherent,
|
||||
.mmap = dma_nommu_mmap_coherent,
|
||||
.map_sg = dma_iommu_map_sg,
|
||||
.unmap_sg = dma_iommu_unmap_sg,
|
||||
.dma_supported = dma_iommu_dma_supported,
|
||||
|
|
|
@ -46,10 +46,10 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
|
|||
* map_page, and unmap_page on highmem, use normal dma_ops
|
||||
* for everything else.
|
||||
*/
|
||||
const struct dma_map_ops swiotlb_dma_ops = {
|
||||
.alloc = __dma_direct_alloc_coherent,
|
||||
.free = __dma_direct_free_coherent,
|
||||
.mmap = dma_direct_mmap_coherent,
|
||||
const struct dma_map_ops powerpc_swiotlb_dma_ops = {
|
||||
.alloc = __dma_nommu_alloc_coherent,
|
||||
.free = __dma_nommu_free_coherent,
|
||||
.mmap = dma_nommu_mmap_coherent,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
|
@ -89,7 +89,7 @@ static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
|
|||
|
||||
/* May need to bounce if the device can't address all of DRAM */
|
||||
if ((dma_get_mask(dev) + 1) < memblock_end_of_DRAM())
|
||||
set_dma_ops(dev, &swiotlb_dma_ops);
|
||||
set_dma_ops(dev, &powerpc_swiotlb_dma_ops);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ static int __init check_swiotlb_enabled(void)
|
|||
if (ppc_swiotlb_enable)
|
||||
swiotlb_print_info();
|
||||
else
|
||||
swiotlb_free();
|
||||
swiotlb_exit();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -33,14 +33,14 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
|
|||
struct dev_archdata __maybe_unused *sd = &dev->archdata;
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
|
||||
if (sd->max_direct_dma_addr && dev->dma_ops == &powerpc_swiotlb_dma_ops)
|
||||
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
|
||||
#endif
|
||||
|
||||
return pfn;
|
||||
}
|
||||
|
||||
static int dma_direct_dma_supported(struct device *dev, u64 mask)
|
||||
static int dma_nommu_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
u64 limit = get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
|
||||
|
@ -62,7 +62,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask)
|
|||
#endif
|
||||
}
|
||||
|
||||
void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -105,9 +105,6 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
|
|||
};
|
||||
#endif /* CONFIG_FSL_SOC */
|
||||
|
||||
/* ignore region specifiers */
|
||||
flag &= ~(__GFP_HIGHMEM);
|
||||
|
||||
page = alloc_pages_node(node, flag, get_order(size));
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
|
@ -119,7 +116,7 @@ void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
|
|||
#endif
|
||||
}
|
||||
|
||||
void __dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
void __dma_nommu_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -130,7 +127,7 @@ void __dma_direct_free_coherent(struct device *dev, size_t size,
|
|||
#endif
|
||||
}
|
||||
|
||||
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -139,8 +136,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
|||
/* The coherent mask may be smaller than the real mask, check if
|
||||
* we can really use the direct ops
|
||||
*/
|
||||
if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
|
||||
return __dma_direct_alloc_coherent(dev, size, dma_handle,
|
||||
if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
|
||||
return __dma_nommu_alloc_coherent(dev, size, dma_handle,
|
||||
flag, attrs);
|
||||
|
||||
/* Ok we can't ... do we have an iommu ? If not, fail */
|
||||
|
@ -154,15 +151,15 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
|||
dev_to_node(dev));
|
||||
}
|
||||
|
||||
static void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
static void dma_nommu_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct iommu_table *iommu;
|
||||
|
||||
/* See comments in dma_direct_alloc_coherent() */
|
||||
if (dma_direct_dma_supported(dev, dev->coherent_dma_mask))
|
||||
return __dma_direct_free_coherent(dev, size, vaddr, dma_handle,
|
||||
/* See comments in dma_nommu_alloc_coherent() */
|
||||
if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
|
||||
return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
|
||||
attrs);
|
||||
/* Maybe we used an iommu ... */
|
||||
iommu = get_iommu_table_base(dev);
|
||||
|
@ -175,7 +172,7 @@ static void dma_direct_free_coherent(struct device *dev, size_t size,
|
|||
iommu_free_coherent(iommu, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -193,7 +190,7 @@ int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
|
|||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
static int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -213,13 +210,13 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
return nents;
|
||||
}
|
||||
|
||||
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
static void dma_nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static u64 dma_direct_get_required_mask(struct device *dev)
|
||||
static u64 dma_nommu_get_required_mask(struct device *dev)
|
||||
{
|
||||
u64 end, mask;
|
||||
|
||||
|
@ -231,7 +228,7 @@ static u64 dma_direct_get_required_mask(struct device *dev)
|
|||
return mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||
static inline dma_addr_t dma_nommu_map_page(struct device *dev,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
|
@ -246,7 +243,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
|||
return page_to_phys(page) + offset + get_dma_offset(dev);
|
||||
}
|
||||
|
||||
static inline void dma_direct_unmap_page(struct device *dev,
|
||||
static inline void dma_nommu_unmap_page(struct device *dev,
|
||||
dma_addr_t dma_address,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
|
@ -255,7 +252,7 @@ static inline void dma_direct_unmap_page(struct device *dev,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
static inline void dma_direct_sync_sg(struct device *dev,
|
||||
static inline void dma_nommu_sync_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
@ -266,7 +263,7 @@ static inline void dma_direct_sync_sg(struct device *dev,
|
|||
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
||||
}
|
||||
|
||||
static inline void dma_direct_sync_single(struct device *dev,
|
||||
static inline void dma_nommu_sync_single(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
@ -274,24 +271,24 @@ static inline void dma_direct_sync_single(struct device *dev,
|
|||
}
|
||||
#endif
|
||||
|
||||
const struct dma_map_ops dma_direct_ops = {
|
||||
.alloc = dma_direct_alloc_coherent,
|
||||
.free = dma_direct_free_coherent,
|
||||
.mmap = dma_direct_mmap_coherent,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.dma_supported = dma_direct_dma_supported,
|
||||
.map_page = dma_direct_map_page,
|
||||
.unmap_page = dma_direct_unmap_page,
|
||||
.get_required_mask = dma_direct_get_required_mask,
|
||||
const struct dma_map_ops dma_nommu_ops = {
|
||||
.alloc = dma_nommu_alloc_coherent,
|
||||
.free = dma_nommu_free_coherent,
|
||||
.mmap = dma_nommu_mmap_coherent,
|
||||
.map_sg = dma_nommu_map_sg,
|
||||
.unmap_sg = dma_nommu_unmap_sg,
|
||||
.dma_supported = dma_nommu_dma_supported,
|
||||
.map_page = dma_nommu_map_page,
|
||||
.unmap_page = dma_nommu_unmap_page,
|
||||
.get_required_mask = dma_nommu_get_required_mask,
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
.sync_single_for_cpu = dma_direct_sync_single,
|
||||
.sync_single_for_device = dma_direct_sync_single,
|
||||
.sync_sg_for_cpu = dma_direct_sync_sg,
|
||||
.sync_sg_for_device = dma_direct_sync_sg,
|
||||
.sync_single_for_cpu = dma_nommu_sync_single,
|
||||
.sync_single_for_device = dma_nommu_sync_single,
|
||||
.sync_sg_for_cpu = dma_nommu_sync_sg,
|
||||
.sync_sg_for_device = dma_nommu_sync_sg,
|
||||
#endif
|
||||
};
|
||||
EXPORT_SYMBOL(dma_direct_ops);
|
||||
EXPORT_SYMBOL(dma_nommu_ops);
|
||||
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask)
|
||||
{
|
||||
|
@ -302,7 +299,7 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
|
|||
* is no dma_op->set_coherent_mask() so we have to do
|
||||
* things the hard way:
|
||||
*/
|
||||
if (get_dma_ops(dev) != &dma_direct_ops ||
|
||||
if (get_dma_ops(dev) != &dma_nommu_ops ||
|
||||
get_iommu_table_base(dev) == NULL ||
|
||||
!dma_iommu_dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
|
|
@ -60,7 +60,7 @@ resource_size_t isa_mem_base;
|
|||
EXPORT_SYMBOL(isa_mem_base);
|
||||
|
||||
|
||||
static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
|
||||
static const struct dma_map_ops *pci_dma_ops = &dma_nommu_ops;
|
||||
|
||||
void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
|
||||
{
|
||||
|
|
|
@ -780,7 +780,7 @@ void arch_setup_pdev_archdata(struct platform_device *pdev)
|
|||
{
|
||||
pdev->archdata.dma_mask = DMA_BIT_MASK(32);
|
||||
pdev->dev.dma_mask = &pdev->archdata.dma_mask;
|
||||
set_dma_ops(&pdev->dev, &dma_direct_ops);
|
||||
set_dma_ops(&pdev->dev, &dma_nommu_ops);
|
||||
}
|
||||
|
||||
static __init void print_system_info(void)
|
||||
|
|
|
@ -541,7 +541,7 @@ static struct cbe_iommu *cell_iommu_for_node(int nid)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned long cell_dma_direct_offset;
|
||||
static unsigned long cell_dma_nommu_offset;
|
||||
|
||||
static unsigned long dma_iommu_fixed_base;
|
||||
|
||||
|
@ -580,7 +580,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
|
|||
device_to_mask(dev), flag,
|
||||
dev_to_node(dev));
|
||||
else
|
||||
return dma_direct_ops.alloc(dev, size, dma_handle, flag,
|
||||
return dma_nommu_ops.alloc(dev, size, dma_handle, flag,
|
||||
attrs);
|
||||
}
|
||||
|
||||
|
@ -592,7 +592,7 @@ static void dma_fixed_free_coherent(struct device *dev, size_t size,
|
|||
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
|
||||
dma_handle);
|
||||
else
|
||||
dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
|
||||
dma_nommu_ops.free(dev, size, vaddr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
|
||||
|
@ -601,7 +601,7 @@ static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
|
|||
unsigned long attrs)
|
||||
{
|
||||
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
|
||||
return dma_direct_ops.map_page(dev, page, offset, size,
|
||||
return dma_nommu_ops.map_page(dev, page, offset, size,
|
||||
direction, attrs);
|
||||
else
|
||||
return iommu_map_page(dev, cell_get_iommu_table(dev), page,
|
||||
|
@ -614,7 +614,7 @@ static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
|||
unsigned long attrs)
|
||||
{
|
||||
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
|
||||
dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
|
||||
dma_nommu_ops.unmap_page(dev, dma_addr, size, direction,
|
||||
attrs);
|
||||
else
|
||||
iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
|
||||
|
@ -626,7 +626,7 @@ static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
unsigned long attrs)
|
||||
{
|
||||
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
|
||||
return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
|
||||
return dma_nommu_ops.map_sg(dev, sg, nents, direction, attrs);
|
||||
else
|
||||
return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
|
||||
nents, device_to_mask(dev),
|
||||
|
@ -638,7 +638,7 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
|
|||
unsigned long attrs)
|
||||
{
|
||||
if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
|
||||
dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
|
||||
dma_nommu_ops.unmap_sg(dev, sg, nents, direction, attrs);
|
||||
else
|
||||
ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
|
||||
direction, attrs);
|
||||
|
@ -661,8 +661,8 @@ static void cell_dma_dev_setup(struct device *dev)
|
|||
{
|
||||
if (get_pci_dma_ops() == &dma_iommu_ops)
|
||||
set_iommu_table_base(dev, cell_get_iommu_table(dev));
|
||||
else if (get_pci_dma_ops() == &dma_direct_ops)
|
||||
set_dma_offset(dev, cell_dma_direct_offset);
|
||||
else if (get_pci_dma_ops() == &dma_nommu_ops)
|
||||
set_dma_offset(dev, cell_dma_nommu_offset);
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
@ -810,14 +810,14 @@ static int __init cell_iommu_init_disabled(void)
|
|||
unsigned long base = 0, size;
|
||||
|
||||
/* When no iommu is present, we use direct DMA ops */
|
||||
set_pci_dma_ops(&dma_direct_ops);
|
||||
set_pci_dma_ops(&dma_nommu_ops);
|
||||
|
||||
/* First make sure all IOC translation is turned off */
|
||||
cell_disable_iommus();
|
||||
|
||||
/* If we have no Axon, we set up the spider DMA magic offset */
|
||||
if (of_find_node_by_name(NULL, "axon") == NULL)
|
||||
cell_dma_direct_offset = SPIDER_DMA_OFFSET;
|
||||
cell_dma_nommu_offset = SPIDER_DMA_OFFSET;
|
||||
|
||||
/* Now we need to check to see where the memory is mapped
|
||||
* in PCI space. We assume that all busses use the same dma
|
||||
|
@ -851,13 +851,13 @@ static int __init cell_iommu_init_disabled(void)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
cell_dma_direct_offset += base;
|
||||
cell_dma_nommu_offset += base;
|
||||
|
||||
if (cell_dma_direct_offset != 0)
|
||||
if (cell_dma_nommu_offset != 0)
|
||||
cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
|
||||
|
||||
printk("iommu: disabled, direct DMA offset is 0x%lx\n",
|
||||
cell_dma_direct_offset);
|
||||
cell_dma_nommu_offset);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
|
|||
*/
|
||||
if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
|
||||
!firmware_has_feature(FW_FEATURE_LPAR)) {
|
||||
dev->dev.dma_ops = &dma_direct_ops;
|
||||
dev->dev.dma_ops = &dma_nommu_ops;
|
||||
/*
|
||||
* Set the coherent DMA mask to prevent the iommu
|
||||
* being used unnecessarily
|
||||
|
|
|
@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
|
|||
return 0;
|
||||
|
||||
/* We use the direct ops for localbus */
|
||||
dev->dma_ops = &dma_direct_ops;
|
||||
dev->dma_ops = &dma_nommu_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1850,7 +1850,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
|
|||
|
||||
if (bypass) {
|
||||
dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
|
||||
set_dma_ops(&pdev->dev, &dma_direct_ops);
|
||||
set_dma_ops(&pdev->dev, &dma_nommu_ops);
|
||||
} else {
|
||||
/*
|
||||
* If the device can't set the TCE bypass bit but still wants
|
||||
|
@ -1868,7 +1868,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
|
|||
return rc;
|
||||
/* 4GB offset bypasses 32-bit space */
|
||||
set_dma_offset(&pdev->dev, (1ULL << 32));
|
||||
set_dma_ops(&pdev->dev, &dma_direct_ops);
|
||||
set_dma_ops(&pdev->dev, &dma_nommu_ops);
|
||||
} else if (dma_mask >> 32 && dma_mask != DMA_BIT_MASK(64)) {
|
||||
/*
|
||||
* Fail the request if a DMA mask between 32 and 64 bits
|
||||
|
|
|
@ -1231,7 +1231,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
|
|||
if (dma_offset != 0) {
|
||||
dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset);
|
||||
set_dma_offset(dev, dma_offset);
|
||||
set_dma_ops(dev, &dma_direct_ops);
|
||||
set_dma_ops(dev, &dma_nommu_ops);
|
||||
ddw_enabled = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -618,7 +618,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
|
|||
static const struct dma_map_ops vio_dma_mapping_ops = {
|
||||
.alloc = vio_dma_iommu_alloc_coherent,
|
||||
.free = vio_dma_iommu_free_coherent,
|
||||
.mmap = dma_direct_mmap_coherent,
|
||||
.mmap = dma_nommu_mmap_coherent,
|
||||
.map_sg = vio_dma_iommu_map_sg,
|
||||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||
.map_page = vio_dma_iommu_map_page,
|
||||
|
|
|
@ -402,7 +402,7 @@ static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
*/
|
||||
if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) {
|
||||
dev_info(dev, "Using 64-bit DMA iommu bypass\n");
|
||||
set_dma_ops(dev, &dma_direct_ops);
|
||||
set_dma_ops(dev, &dma_nommu_ops);
|
||||
} else {
|
||||
dev_info(dev, "Using 32-bit DMA via iommu\n");
|
||||
set_dma_ops(dev, &dma_iommu_ops);
|
||||
|
@ -446,7 +446,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
|
|||
controller_ops->dma_bus_setup = NULL;
|
||||
|
||||
/* Setup pci_dma ops */
|
||||
set_pci_dma_ops(&dma_direct_ops);
|
||||
set_pci_dma_ops(&dma_nommu_ops);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
|
|
@ -118,7 +118,7 @@ static void setup_swiotlb_ops(struct pci_controller *hose)
|
|||
{
|
||||
if (ppc_swiotlb_enable) {
|
||||
hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
|
||||
set_pci_dma_ops(&swiotlb_dma_ops);
|
||||
set_pci_dma_ops(&powerpc_swiotlb_dma_ops);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -135,7 +135,7 @@ static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
* mapping that allows addressing any RAM address from across PCI.
|
||||
*/
|
||||
if (dev_is_pci(dev) && dma_mask >= pci64_dma_offset * 2 - 1) {
|
||||
set_dma_ops(dev, &dma_direct_ops);
|
||||
set_dma_ops(dev, &dma_nommu_ops);
|
||||
set_dma_offset(dev, pci64_dma_offset);
|
||||
}
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ config PGTABLE_LEVELS
|
|||
config HAVE_KPROBES
|
||||
def_bool n
|
||||
|
||||
config DMA_NOOP_OPS
|
||||
config DMA_DIRECT_OPS
|
||||
def_bool y
|
||||
|
||||
menu "Platform type"
|
||||
|
|
|
@ -7,6 +7,7 @@ generic-y += device.h
|
|||
generic-y += div64.h
|
||||
generic-y += dma.h
|
||||
generic-y += dma-contiguous.h
|
||||
generic-y += dma-mapping.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += errno.h
|
||||
generic-y += exec.h
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2003-2004 Hewlett-Packard Co
|
||||
* David Mosberger-Tang <davidm@hpl.hp.com>
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Copyright (C) 2016 SiFive, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_RISCV_DMA_MAPPING_H
|
||||
#define __ASM_RISCV_DMA_MAPPING_H
|
||||
|
||||
/* Use ops->dma_mapping_error (if it exists) or assume success */
|
||||
// #undef DMA_ERROR_CODE
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &dma_noop_ops;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
#endif /* __ASM_RISCV_DMA_MAPPING_H */
|
|
@ -140,7 +140,7 @@ config S390
|
|||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select DMA_NOOP_OPS
|
||||
select DMA_DIRECT_OPS
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
|
|
|
@ -4,6 +4,7 @@ generic-y += cacheflush.h
|
|||
generic-y += clkdev.h
|
||||
generic-y += device.h
|
||||
generic-y += dma-contiguous.h
|
||||
generic-y += dma-mapping.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += export.h
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_S390_DMA_MAPPING_H
|
||||
#define _ASM_S390_DMA_MAPPING_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
extern const struct dma_map_ops s390_pci_dma_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &dma_noop_ops;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return false;
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
#endif /* _ASM_S390_DMA_MAPPING_H */
|
|
@ -201,4 +201,7 @@ void dma_cleanup_tables(unsigned long *);
|
|||
unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
|
||||
void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
|
||||
|
||||
extern const struct dma_map_ops s390_pci_dma_ops;
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -249,7 +249,7 @@ config HIGHMEM
|
|||
|
||||
If unsure, say "true".
|
||||
|
||||
config ZONE_DMA
|
||||
config ZONE_DMA32
|
||||
def_bool y
|
||||
|
||||
config IOMMU_HELPER
|
||||
|
@ -261,6 +261,7 @@ config NEED_SG_DMA_LENGTH
|
|||
config SWIOTLB
|
||||
bool
|
||||
default TILEGX
|
||||
select DMA_DIRECT_OPS
|
||||
select IOMMU_HELPER
|
||||
select NEED_SG_DMA_LENGTH
|
||||
select ARCH_HAS_DMA_SET_COHERENT_MASK
|
||||
|
|
|
@ -44,26 +44,6 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
|||
dev->archdata.dma_offset = off;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr;
|
||||
}
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
#define HAVE_ARCH_DMA_SET_MASK 1
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
|
|||
* which case we will return NULL. But such devices are uncommon.
|
||||
*/
|
||||
if (dma_mask <= DMA_BIT_MASK(32)) {
|
||||
gfp |= GFP_DMA;
|
||||
gfp |= GFP_DMA32;
|
||||
node = 0;
|
||||
}
|
||||
|
||||
|
@ -509,39 +509,9 @@ EXPORT_SYMBOL(gx_pci_dma_map_ops);
|
|||
/* PCI DMA mapping functions for legacy PCI devices */
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
unsigned long attrs)
|
||||
{
|
||||
gfp |= GFP_DMA;
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
}
|
||||
|
||||
static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
||||
}
|
||||
|
||||
static const struct dma_map_ops pci_swiotlb_dma_ops = {
|
||||
.alloc = tile_swiotlb_alloc_coherent,
|
||||
.free = tile_swiotlb_free_coherent,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
|
||||
static const struct dma_map_ops pci_hybrid_dma_ops = {
|
||||
.alloc = tile_swiotlb_alloc_coherent,
|
||||
.free = tile_swiotlb_free_coherent,
|
||||
.alloc = swiotlb_alloc,
|
||||
.free = swiotlb_free,
|
||||
.map_page = tile_pci_dma_map_page,
|
||||
.unmap_page = tile_pci_dma_unmap_page,
|
||||
.map_sg = tile_pci_dma_map_sg,
|
||||
|
@ -552,7 +522,7 @@ static const struct dma_map_ops pci_hybrid_dma_ops = {
|
|||
.sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
|
||||
};
|
||||
|
||||
const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
|
||||
const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &swiotlb_dma_ops;
|
||||
const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
|
||||
#else
|
||||
const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
|
||||
|
|
|
@ -814,11 +814,11 @@ static void __init zone_sizes_init(void)
|
|||
#endif
|
||||
|
||||
if (start < dma_end) {
|
||||
zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
|
||||
zones_size[ZONE_DMA32] = min(zones_size[ZONE_NORMAL],
|
||||
dma_end - start);
|
||||
zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
|
||||
zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA32];
|
||||
} else {
|
||||
zones_size[ZONE_DMA] = 0;
|
||||
zones_size[ZONE_DMA32] = 0;
|
||||
}
|
||||
|
||||
/* Take zone metadata from controller 0 if we're isolnode. */
|
||||
|
@ -830,7 +830,7 @@ static void __init zone_sizes_init(void)
|
|||
PFN_UP(node_percpu[i]));
|
||||
|
||||
/* Track the type of memory on each node */
|
||||
if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
|
||||
if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA32])
|
||||
node_set_state(i, N_NORMAL_MEMORY);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (end != start)
|
||||
|
|
|
@ -12,38 +12,11 @@
|
|||
#ifndef __UNICORE_DMA_MAPPING_H__
|
||||
#define __UNICORE_DMA_MAPPING_H__
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
extern const struct dma_map_ops swiotlb_dma_map_ops;
|
||||
|
||||
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
||||
{
|
||||
return &swiotlb_dma_map_ops;
|
||||
return &swiotlb_dma_ops;
|
||||
}
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (dev && dev->dma_mask)
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr;
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return daddr;
|
||||
}
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif
|
||||
|
|
|
@ -42,6 +42,7 @@ config CPU_TLB_SINGLE_ENTRY_DISABLE
|
|||
|
||||
config SWIOTLB
|
||||
def_bool y
|
||||
select DMA_DIRECT_OPS
|
||||
|
||||
config IOMMU_HELPER
|
||||
def_bool SWIOTLB
|
||||
|
|
|
@ -6,8 +6,6 @@
|
|||
obj-y := extable.o fault.o init.o pgd.o mmu.o
|
||||
obj-y += flush.o ioremap.o
|
||||
|
||||
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
|
||||
|
||||
obj-$(CONFIG_MODULES) += proc-syms.o
|
||||
|
||||
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
/*
|
||||
* Contains routines needed to support swiotlb for UniCore32.
|
||||
*
|
||||
* Copyright (C) 2010 Guan Xuetao
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <linux/pci.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/bootmem.h>
|
||||
|
||||
#include <asm/dma.h>
|
||||
|
||||
static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
|
||||
static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_addr,
|
||||
unsigned long attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
||||
}
|
||||
|
||||
const struct dma_map_ops swiotlb_dma_map_ops = {
|
||||
.alloc = unicore_swiotlb_alloc_coherent,
|
||||
.free = unicore_swiotlb_free_coherent,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
EXPORT_SYMBOL(swiotlb_dma_map_ops);
|
|
@ -54,6 +54,7 @@ config X86
|
|||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_KCOV if X86_64
|
||||
select ARCH_HAS_PHYS_TO_DMA
|
||||
select ARCH_HAS_PMEM_API if X86_64
|
||||
select ARCH_HAS_REFCOUNT
|
||||
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef ASM_X86_DMA_DIRECT_H
|
||||
#define ASM_X86_DMA_DIRECT_H 1
|
||||
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
|
||||
bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
#else
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return __sme_set(paddr);
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return __sme_clr(daddr);
|
||||
}
|
||||
#endif /* CONFIG_X86_DMA_REMAP */
|
||||
#endif /* ASM_X86_DMA_DIRECT_H */
|
|
@ -12,7 +12,6 @@
|
|||
#include <asm/io.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
|
||||
#ifdef CONFIG_ISA
|
||||
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
|
||||
|
@ -31,6 +30,9 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
|
|||
return dma_ops;
|
||||
}
|
||||
|
||||
int arch_dma_supported(struct device *dev, u64 mask);
|
||||
#define arch_dma_supported arch_dma_supported
|
||||
|
||||
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
|
||||
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
|
||||
|
||||
|
@ -42,31 +44,6 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size,
|
|||
void *vaddr, dma_addr_t dma_addr,
|
||||
unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
|
||||
extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
|
||||
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
#else
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask)
|
||||
return 0;
|
||||
|
||||
return addr + size - 1 <= *dev->dma_mask;
|
||||
}
|
||||
|
||||
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
||||
{
|
||||
return __sme_set(paddr);
|
||||
}
|
||||
|
||||
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
||||
{
|
||||
return __sme_clr(daddr);
|
||||
}
|
||||
#endif /* CONFIG_X86_DMA_REMAP */
|
||||
|
||||
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
|
||||
gfp_t gfp)
|
||||
{
|
||||
|
|
|
@ -28,8 +28,6 @@ static inline void pci_swiotlb_late_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||
|
||||
extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
unsigned long attrs);
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/proto.h>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-debug.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/export.h>
|
||||
|
@ -87,7 +87,6 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|||
|
||||
dma_mask = dma_alloc_coherent_mask(dev, flag);
|
||||
|
||||
flag &= ~__GFP_ZERO;
|
||||
again:
|
||||
page = NULL;
|
||||
/* CMA can be used only in the context which permits sleeping */
|
||||
|
@ -139,7 +138,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
|
|||
if (!*dev)
|
||||
*dev = &x86_dma_fallback_dev;
|
||||
|
||||
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
||||
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
|
||||
|
||||
if (!is_device_dma_capable(*dev))
|
||||
|
@ -217,7 +215,7 @@ static __init int iommu_setup(char *p)
|
|||
}
|
||||
early_param("iommu", iommu_setup);
|
||||
|
||||
int x86_dma_supported(struct device *dev, u64 mask)
|
||||
int arch_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
#ifdef CONFIG_PCI
|
||||
if (mask > 0xffffffff && forbid_dac > 0) {
|
||||
|
@ -226,12 +224,6 @@ int x86_dma_supported(struct device *dev, u64 mask)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* Copied from i386. Doesn't make much sense, because it will
|
||||
only work for pci_alloc_coherent.
|
||||
The caller just has to use GFP_DMA in this case. */
|
||||
if (mask < DMA_BIT_MASK(24))
|
||||
return 0;
|
||||
|
||||
/* Tell the device to use SAC when IOMMU force is on. This
|
||||
allows the driver to use cheaper accesses in some cases.
|
||||
|
||||
|
@ -251,6 +243,17 @@ int x86_dma_supported(struct device *dev, u64 mask)
|
|||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(arch_dma_supported);
|
||||
|
||||
int x86_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/* Copied from i386. Doesn't make much sense, because it will
|
||||
only work for pci_alloc_coherent.
|
||||
The caller just has to use GFP_DMA in this case. */
|
||||
if (mask < DMA_BIT_MASK(24))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init pci_iommu_init(void)
|
||||
{
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue