dma-mapping updates for 5.9
- make support for dma_ops optional - move more code out of line - add generic support for a dma_ops bypass mode - misc cleanups -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl8oGscLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYNfEhAAmFwd6BBHGwAhXUchoIue5vdNnuY3GiBFRzUdz67W zRYYgZYiPjl+MwflRmwPcoWEnGzmweRa2s6OnyDostiCRauioa8BuQfGqJasf1yZ D36dFNVHGW0o6pRDUQkd688k/4A6szwuwpq83qi4e8X2I9QzAITHtW8izjfPM923 FlJzxEFggbB2TvwfUXOZhmpuG4Dog8S7VZ1Uz4QAg0Z/5FDqIKAAG2aZMqCXBbiX 01E8tr0AqU/jn2xpc8O+DJGFiYIRhqhyNxQbH6qz1Q3xGFSokcLYm3YqkqVOgpn1 DLs2UFDxWkly/F+wGnYtju7OD9VGPywzOcW125/LIsApYN5R/rYrtQzK41eq7Mp5 HY3tqgNTIMdnl4so7QXeU4Vxj+lUdPlI26NZGszcM5AVftdTX8KjGdS+0+PBza6i i7trwG7J5/DnwiBCvEKoul7Ul1psUMTSvYwINTXRqsU4mZXhhx/mwyXbtruELnkj 3agM98u6hoalLNjd2aueh+NjMZi1r+MchTrfRvTcxJ+yQ5BoR5kF+iz7eT/LtZ72 AqWwimsPGNkLHUa0TrqWql5tv90cdDkBZzWXVbixwxRfgynWYLE6jugeIy8hwjFf GjO5XKbBwnWPjdSzFsVMPeuNpmr7ZjVHHewy2Q/jWQAIOyeof0VztEl23LN5yUkx pc8= =90UK -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.9' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - make support for dma_ops optional - move more code out of line - add generic support for a dma_ops bypass mode - misc cleanups * tag 'dma-mapping-5.9' of git://git.infradead.org/users/hch/dma-mapping: dma-contiguous: cleanup dma_alloc_contiguous dma-debug: use named initializers for dir2name powerpc: use the generic dma_ops_bypass mode dma-mapping: add a dma_ops_bypass flag to struct device dma-mapping: make support for dma ops optional dma-mapping: inline the fast path dma-direct calls dma-mapping: move the remaining DMA API calls out of line
This commit is contained in:
commit
2ed90dbbf7
|
@ -7,6 +7,7 @@ config ALPHA
|
|||
select ARCH_NO_PREEMPT
|
||||
select ARCH_NO_SG_CHAIN
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
select DMA_OPS if PCI
|
||||
select FORCE_PCI if !ALPHA_JENSEN
|
||||
select PCI_DOMAINS if PCI
|
||||
select PCI_SYSCALL if PCI
|
||||
|
|
|
@ -41,6 +41,7 @@ config ARM
|
|||
select CPU_PM if SUSPEND || CPU_IDLE
|
||||
select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
select DMA_DECLARE_COHERENT
|
||||
select DMA_OPS
|
||||
select DMA_REMAP if MMU
|
||||
select EDAC_SUPPORT
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
|
|
|
@ -192,6 +192,7 @@ config IA64_SGI_UV
|
|||
|
||||
config IA64_HP_SBA_IOMMU
|
||||
bool "HP SBA IOMMU support"
|
||||
select DMA_OPS
|
||||
default y
|
||||
help
|
||||
Say Y here to add support for the SBA IOMMU found on HP zx1 and
|
||||
|
|
|
@ -366,6 +366,7 @@ config MACH_JAZZ
|
|||
select ARC_PROMLIB
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select DMA_OPS
|
||||
select FW_ARC
|
||||
select FW_ARC32
|
||||
select ARCH_MAY_HAVE_PC_FDC
|
||||
|
|
|
@ -14,6 +14,7 @@ config PARISC
|
|||
select ARCH_HAS_UBSAN_SANITIZE_ALL
|
||||
select ARCH_NO_SG_CHAIN
|
||||
select ARCH_SUPPORTS_MEMORY_FAILURE
|
||||
select DMA_OPS
|
||||
select RTC_CLASS
|
||||
select RTC_DRV_GENERIC
|
||||
select INIT_ALL_POSSIBLE
|
||||
|
|
|
@ -151,6 +151,8 @@ config PPC
|
|||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
|
||||
select DMA_OPS if PPC64
|
||||
select DMA_OPS_BYPASS if PPC64
|
||||
select DYNAMIC_FTRACE if FUNCTION_TRACER
|
||||
select EDAC_ATOMIC_SCRUB
|
||||
select EDAC_SUPPORT
|
||||
|
|
|
@ -18,11 +18,6 @@ struct iommu_table;
|
|||
* drivers/macintosh/macio_asic.c
|
||||
*/
|
||||
struct dev_archdata {
|
||||
/*
|
||||
* Set to %true if the dma_iommu_ops are requested to use a direct
|
||||
* window instead of dynamically mapping memory.
|
||||
*/
|
||||
bool iommu_bypass : 1;
|
||||
/*
|
||||
* These two used to be a union. However, with the hybrid ops we need
|
||||
* both so here we store both a DMA offset for direct mappings and
|
||||
|
|
|
@ -14,23 +14,6 @@
|
|||
* Generic iommu implementation
|
||||
*/
|
||||
|
||||
/*
|
||||
* The coherent mask may be smaller than the real mask, check if we can
|
||||
* really use a direct window.
|
||||
*/
|
||||
static inline bool dma_iommu_alloc_bypass(struct device *dev)
|
||||
{
|
||||
return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
|
||||
dma_direct_supported(dev, dev->coherent_dma_mask);
|
||||
}
|
||||
|
||||
static inline bool dma_iommu_map_bypass(struct device *dev,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return dev->archdata.iommu_bypass &&
|
||||
(!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
|
||||
}
|
||||
|
||||
/* Allocates a contiguous real buffer and creates mappings over it.
|
||||
* Returns the virtual address of the buffer and sets dma_handle
|
||||
* to the dma address (mapping) of the first page.
|
||||
|
@ -39,8 +22,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
|||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (dma_iommu_alloc_bypass(dev))
|
||||
return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
|
||||
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
|
||||
dma_handle, dev->coherent_dma_mask, flag,
|
||||
dev_to_node(dev));
|
||||
|
@ -50,11 +31,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
|
|||
void *vaddr, dma_addr_t dma_handle,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (dma_iommu_alloc_bypass(dev))
|
||||
dma_direct_free(dev, size, vaddr, dma_handle, attrs);
|
||||
else
|
||||
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
|
||||
dma_handle);
|
||||
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
/* Creates TCEs for a user provided buffer. The user buffer must be
|
||||
|
@ -67,9 +44,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
|
|||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (dma_iommu_map_bypass(dev, attrs))
|
||||
return dma_direct_map_page(dev, page, offset, size, direction,
|
||||
attrs);
|
||||
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
|
||||
size, dma_get_mask(dev), direction, attrs);
|
||||
}
|
||||
|
@ -79,11 +53,8 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
|
|||
size_t size, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dma_iommu_map_bypass(dev, attrs))
|
||||
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
|
||||
direction, attrs);
|
||||
else
|
||||
dma_direct_unmap_page(dev, dma_handle, size, direction, attrs);
|
||||
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
|
||||
attrs);
|
||||
}
|
||||
|
||||
|
||||
|
@ -91,8 +62,6 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
int nelems, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (dma_iommu_map_bypass(dev, attrs))
|
||||
return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
|
||||
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
|
||||
dma_get_mask(dev), direction, attrs);
|
||||
}
|
||||
|
@ -101,11 +70,8 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
int nelems, enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!dma_iommu_map_bypass(dev, attrs))
|
||||
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
|
||||
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
|
||||
direction, attrs);
|
||||
else
|
||||
dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs);
|
||||
}
|
||||
|
||||
static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
|
||||
|
@ -113,8 +79,9 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
|
|||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
|
||||
|
||||
return phb->controller_ops.iommu_bypass_supported &&
|
||||
phb->controller_ops.iommu_bypass_supported(pdev, mask);
|
||||
if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
|
||||
return false;
|
||||
return phb->controller_ops.iommu_bypass_supported(pdev, mask);
|
||||
}
|
||||
|
||||
/* We support DMA to/from any memory page via the iommu */
|
||||
|
@ -123,7 +90,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
|||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||
|
||||
if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
|
||||
dev->archdata.iommu_bypass = true;
|
||||
dev->dma_ops_bypass = true;
|
||||
dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
|
||||
return 1;
|
||||
}
|
||||
|
@ -141,7 +108,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
|||
}
|
||||
|
||||
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
|
||||
dev->archdata.iommu_bypass = false;
|
||||
dev->dma_ops_bypass = false;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -153,47 +120,12 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
|||
if (!tbl)
|
||||
return 0;
|
||||
|
||||
if (dev_is_pci(dev)) {
|
||||
u64 bypass_mask = dma_direct_get_required_mask(dev);
|
||||
|
||||
if (dma_iommu_bypass_supported(dev, bypass_mask))
|
||||
return bypass_mask;
|
||||
}
|
||||
|
||||
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
|
||||
mask += mask - 1;
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_iommu_alloc_bypass(dev))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t sz, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_iommu_alloc_bypass(dev))
|
||||
dma_direct_sync_single_for_device(dev, addr, sz, dir);
|
||||
}
|
||||
|
||||
extern void dma_iommu_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_iommu_alloc_bypass(dev))
|
||||
dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
|
||||
}
|
||||
|
||||
extern void dma_iommu_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
if (dma_iommu_alloc_bypass(dev))
|
||||
dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
|
||||
}
|
||||
|
||||
const struct dma_map_ops dma_iommu_ops = {
|
||||
.alloc = dma_iommu_alloc_coherent,
|
||||
.free = dma_iommu_free_coherent,
|
||||
|
@ -203,10 +135,6 @@ const struct dma_map_ops dma_iommu_ops = {
|
|||
.map_page = dma_iommu_map_page,
|
||||
.unmap_page = dma_iommu_unmap_page,
|
||||
.get_required_mask = dma_iommu_get_required_mask,
|
||||
.sync_single_for_cpu = dma_iommu_sync_for_cpu,
|
||||
.sync_single_for_device = dma_iommu_sync_for_device,
|
||||
.sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = dma_iommu_sync_sg_for_device,
|
||||
.mmap = dma_common_mmap,
|
||||
.get_sgtable = dma_common_get_sgtable,
|
||||
};
|
||||
|
|
|
@ -112,6 +112,7 @@ config S390
|
|||
select ARCH_WANT_IPC_PARSE_VERSION
|
||||
select BUILDTIME_TABLE_SORT
|
||||
select CLONE_BACKWARDS2
|
||||
select DMA_OPS if PCI
|
||||
select DYNAMIC_FTRACE if FUNCTION_TRACER
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
|
|
|
@ -15,6 +15,7 @@ config SPARC
|
|||
default y
|
||||
select ARCH_MIGHT_HAVE_PC_PARPORT if SPARC64 && PCI
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select DMA_OPS
|
||||
select OF
|
||||
select OF_PROMTREE
|
||||
select HAVE_ASM_MODVERSIONS
|
||||
|
|
|
@ -909,6 +909,7 @@ config DMI
|
|||
|
||||
config GART_IOMMU
|
||||
bool "Old AMD GART IOMMU support"
|
||||
select DMA_OPS
|
||||
select IOMMU_HELPER
|
||||
select SWIOTLB
|
||||
depends on X86_64 && PCI && AMD_NB
|
||||
|
|
|
@ -1183,6 +1183,8 @@ static void setup_dma_device(struct ib_device *device)
|
|||
struct device *parent = device->dev.parent;
|
||||
|
||||
WARN_ON_ONCE(device->dma_device);
|
||||
|
||||
#ifdef CONFIG_DMA_OPS
|
||||
if (device->dev.dma_ops) {
|
||||
/*
|
||||
* The caller provided custom DMA operations. Copy the
|
||||
|
@ -1203,7 +1205,9 @@ static void setup_dma_device(struct ib_device *device)
|
|||
else
|
||||
WARN_ON_ONCE(true);
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
#endif /* CONFIG_DMA_OPS */
|
||||
{
|
||||
/*
|
||||
* The caller did not provide custom DMA operations. Use the
|
||||
* DMA mapping operations of the parent device.
|
||||
|
|
|
@ -97,6 +97,7 @@ config OF_IOMMU
|
|||
# IOMMU-agnostic DMA-mapping layer
|
||||
config IOMMU_DMA
|
||||
bool
|
||||
select DMA_OPS
|
||||
select IOMMU_API
|
||||
select IOMMU_IOVA
|
||||
select IRQ_MSI_IOMMU
|
||||
|
@ -183,6 +184,7 @@ config DMAR_TABLE
|
|||
config INTEL_IOMMU
|
||||
bool "Support for Intel IOMMU using DMA Remapping Devices"
|
||||
depends on PCI_MSI && ACPI && (X86 || IA64)
|
||||
select DMA_OPS
|
||||
select IOMMU_API
|
||||
select IOMMU_IOVA
|
||||
select NEED_DMA_MAP_STATE
|
||||
|
|
|
@ -382,7 +382,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
|
|||
dma_set_max_seg_size(&dev->ofdev.dev, 65536);
|
||||
dma_set_seg_boundary(&dev->ofdev.dev, 0xffffffff);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
#if defined(CONFIG_PCI) && defined(CONFIG_DMA_OPS)
|
||||
/* Set the DMA ops to the ones from the PCI device, this could be
|
||||
* fishy if we didn't know that on PowerMac it's always direct ops
|
||||
* or iommu ops that will work fine
|
||||
|
@ -391,7 +391,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
|
|||
*/
|
||||
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
|
||||
dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
|
||||
#endif /* CONFIG_PCI */
|
||||
#endif /* CONFIG_PCI && CONFIG_DMA_OPS */
|
||||
|
||||
#ifdef DEBUG
|
||||
printk("preparing mdev @%p, ofdev @%p, dev @%p, kobj @%p\n",
|
||||
|
|
|
@ -4,6 +4,7 @@ menu "Intel MIC & related support"
|
|||
config INTEL_MIC_BUS
|
||||
tristate "Intel MIC Bus Driver"
|
||||
depends on 64BIT && PCI && X86
|
||||
select DMA_OPS
|
||||
help
|
||||
This option is selected by any driver which registers a
|
||||
device or driver on the MIC Bus, such as CONFIG_INTEL_MIC_HOST,
|
||||
|
@ -19,6 +20,7 @@ config INTEL_MIC_BUS
|
|||
config SCIF_BUS
|
||||
tristate "SCIF Bus Driver"
|
||||
depends on 64BIT && PCI && X86
|
||||
select DMA_OPS
|
||||
help
|
||||
This option is selected by any driver which registers a
|
||||
device or driver on the SCIF Bus, such as CONFIG_INTEL_MIC_HOST
|
||||
|
@ -33,6 +35,7 @@ config SCIF_BUS
|
|||
|
||||
config VOP_BUS
|
||||
tristate "VOP Bus Driver"
|
||||
select DMA_OPS
|
||||
help
|
||||
This option is selected by any driver which registers a
|
||||
device or driver on the VOP Bus, such as CONFIG_INTEL_MIC_HOST
|
||||
|
@ -49,6 +52,7 @@ config INTEL_MIC_HOST
|
|||
tristate "Intel MIC Host Driver"
|
||||
depends on 64BIT && PCI && X86
|
||||
depends on INTEL_MIC_BUS && SCIF_BUS && MIC_COSM && VOP_BUS
|
||||
select DMA_OPS
|
||||
help
|
||||
This enables Host Driver support for the Intel Many Integrated
|
||||
Core (MIC) family of PCIe form factor coprocessor devices that
|
||||
|
|
|
@ -11,6 +11,7 @@ if VDPA
|
|||
config VDPA_SIM
|
||||
tristate "vDPA device simulator"
|
||||
depends on RUNTIME_TESTING_MENU && HAS_DMA
|
||||
select DMA_OPS
|
||||
select VHOST_RING
|
||||
default n
|
||||
help
|
||||
|
|
|
@ -179,6 +179,7 @@ config XEN_GRANT_DMA_ALLOC
|
|||
|
||||
config SWIOTLB_XEN
|
||||
def_bool y
|
||||
select DMA_OPS
|
||||
select SWIOTLB
|
||||
|
||||
config XEN_PCIDEV_BACKEND
|
||||
|
|
|
@ -525,6 +525,11 @@ struct dev_links_info {
|
|||
* sync_state() callback.
|
||||
* @dma_coherent: this particular device is dma coherent, even if the
|
||||
* architecture supports non-coherent devices.
|
||||
* @dma_ops_bypass: If set to %true then the dma_ops are bypassed for the
|
||||
* streaming DMA operations (->map_* / ->unmap_* / ->sync_*),
|
||||
* and optionall (if the coherent mask is large enough) also
|
||||
* for dma allocations. This flag is managed by the dma ops
|
||||
* instance from ->dma_supported.
|
||||
*
|
||||
* At the lowest level, every device in a Linux system is represented by an
|
||||
* instance of struct device. The device structure contains the information
|
||||
|
@ -574,8 +579,9 @@ struct device {
|
|||
#ifdef CONFIG_GENERIC_MSI_IRQ
|
||||
struct list_head msi_list;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DMA_OPS
|
||||
const struct dma_map_ops *dma_ops;
|
||||
#endif
|
||||
u64 *dma_mask; /* dma mask (if dma'able device) */
|
||||
u64 coherent_dma_mask;/* Like dma_mask, but for
|
||||
alloc_coherent mappings as
|
||||
|
@ -628,6 +634,9 @@ struct device {
|
|||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
|
||||
bool dma_coherent:1;
|
||||
#endif
|
||||
#ifdef CONFIG_DMA_OPS_BYPASS
|
||||
bool dma_ops_bypass : 1;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline struct device *kobj_to_dev(struct kobject *kobj)
|
||||
|
|
|
@ -1,10 +1,16 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Internals of the DMA direct mapping implementation. Only for use by the
|
||||
* DMA mapping code and IOMMU drivers.
|
||||
*/
|
||||
#ifndef _LINUX_DMA_DIRECT_H
|
||||
#define _LINUX_DMA_DIRECT_H 1
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/memblock.h> /* for min_low_pfn */
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
extern unsigned int zone_dma_bits;
|
||||
|
||||
|
@ -87,4 +93,102 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
unsigned long attrs);
|
||||
int dma_direct_supported(struct device *dev, u64 mask);
|
||||
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
size_t dma_direct_max_mapping_size(struct device *dev);
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t paddr = dma_to_phys(dev, addr);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_device(paddr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t paddr = dma_to_phys(dev, addr);
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||
arch_sync_dma_for_cpu_all();
|
||||
}
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
||||
|
||||
if (unlikely(swiotlb_force == SWIOTLB_FORCE))
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
|
||||
if (swiotlb_force != SWIOTLB_NO_FORCE)
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
dev_WARN_ONCE(dev, 1,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = dma_to_phys(dev, addr);
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(phys)))
|
||||
swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
|
||||
}
|
||||
#endif /* _LINUX_DMA_DIRECT_H */
|
||||
|
|
|
@ -188,76 +188,10 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
|
|||
}
|
||||
#endif /* CONFIG_DMA_DECLARE_COHERENT */
|
||||
|
||||
static inline bool dma_is_direct(const struct dma_map_ops *ops)
|
||||
{
|
||||
return likely(!ops);
|
||||
}
|
||||
|
||||
/*
|
||||
* All the dma_direct_* declarations are here just for the indirect call bypass,
|
||||
* and must not be used directly drivers!
|
||||
*/
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir);
|
||||
void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir);
|
||||
void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
|
||||
#else
|
||||
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t dma_direct_max_mapping_size(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
#include <asm/dma-mapping.h>
|
||||
|
||||
#ifdef CONFIG_DMA_OPS
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
if (dev->dma_ops)
|
||||
|
@ -270,165 +204,16 @@ static inline void set_dma_ops(struct device *dev,
|
|||
{
|
||||
dev->dma_ops = dma_ops;
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct page *page, size_t offset, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
#else /* CONFIG_DMA_OPS */
|
||||
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr);
|
||||
|
||||
return addr;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
static inline void set_dma_ops(struct device *dev,
|
||||
const struct dma_map_ops *dma_ops)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
|
||||
* It should never return a value < 0.
|
||||
*/
|
||||
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
int ents;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
|
||||
else
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
BUG_ON(ents < 0);
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
||||
|
||||
return ents;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_unmap_sg(dev, sg, nents, dir);
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
|
||||
else if (ops->unmap_sg)
|
||||
ops->unmap_sg(dev, sg, nents, dir, attrs);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_resource(struct device *dev,
|
||||
phys_addr_t phys_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr = DMA_MAPPING_ERROR;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
/* Don't allow RAM to be mapped */
|
||||
if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
|
||||
else if (ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (!dma_is_direct(ops) && ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_resource(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_cpu)
|
||||
ops->sync_single_for_cpu(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_single_for_device(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_device)
|
||||
ops->sync_single_for_device(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_cpu)
|
||||
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_is_direct(ops))
|
||||
dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_device)
|
||||
ops->sync_sg_for_device(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
|
||||
}
|
||||
#endif /* CONFIG_DMA_OPS */
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
|
@ -439,6 +224,28 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir);
|
||||
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir);
|
||||
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir);
|
||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flag, unsigned long attrs);
|
||||
void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
|
|
@ -5,6 +5,17 @@ config HAS_DMA
|
|||
depends on !NO_DMA
|
||||
default y
|
||||
|
||||
config DMA_OPS
|
||||
bool
|
||||
|
||||
#
|
||||
# IOMMU drivers that can bypass the IOMMU code and optionally use the direct
|
||||
# mapping fast path should select this option and set the dma_ops_bypass
|
||||
# flag in struct device where applicable
|
||||
#
|
||||
config DMA_OPS_BYPASS
|
||||
bool
|
||||
|
||||
config NEED_SG_DMA_LENGTH
|
||||
bool
|
||||
|
||||
|
@ -60,6 +71,7 @@ config DMA_NONCOHERENT_CACHE_SYNC
|
|||
config DMA_VIRT_OPS
|
||||
bool
|
||||
depends on HAS_DMA
|
||||
select DMA_OPS
|
||||
|
||||
config SWIOTLB
|
||||
bool
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_HAS_DMA) += mapping.o direct.o dummy.o
|
||||
obj-$(CONFIG_HAS_DMA) += mapping.o direct.o
|
||||
obj-$(CONFIG_DMA_OPS) += dummy.o
|
||||
obj-$(CONFIG_DMA_CMA) += contiguous.o
|
||||
obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
|
||||
obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
|
||||
|
|
|
@ -215,6 +215,13 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|||
return cma_release(dev_get_cma_area(dev), pages, count);
|
||||
}
|
||||
|
||||
static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
|
||||
{
|
||||
unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
|
||||
|
||||
return cma_alloc(cma, size >> PAGE_SHIFT, align, gfp & __GFP_NOWARN);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_alloc_contiguous() - allocate contiguous pages
|
||||
* @dev: Pointer to device for which the allocation is performed.
|
||||
|
@ -231,24 +238,14 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
|
|||
*/
|
||||
struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
|
||||
{
|
||||
size_t count = size >> PAGE_SHIFT;
|
||||
struct page *page = NULL;
|
||||
struct cma *cma = NULL;
|
||||
|
||||
if (dev && dev->cma_area)
|
||||
cma = dev->cma_area;
|
||||
else if (count > 1)
|
||||
cma = dma_contiguous_default_area;
|
||||
|
||||
/* CMA can be used only in the context which permits sleeping */
|
||||
if (cma && gfpflags_allow_blocking(gfp)) {
|
||||
size_t align = get_order(size);
|
||||
size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
|
||||
|
||||
page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
|
||||
}
|
||||
|
||||
return page;
|
||||
if (!gfpflags_allow_blocking(gfp))
|
||||
return NULL;
|
||||
if (dev->cma_area)
|
||||
return cma_alloc_aligned(dev->cma_area, size, gfp);
|
||||
if (size <= PAGE_SIZE || !dma_contiguous_default_area)
|
||||
return NULL;
|
||||
return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -144,8 +144,12 @@ static const char *type2name[] = {
|
|||
[dma_debug_resource] = "resource",
|
||||
};
|
||||
|
||||
static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
|
||||
"DMA_FROM_DEVICE", "DMA_NONE" };
|
||||
static const char *dir2name[] = {
|
||||
[DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
|
||||
[DMA_TO_DEVICE] = "DMA_TO_DEVICE",
|
||||
[DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
|
||||
[DMA_NONE] = "DMA_NONE",
|
||||
};
|
||||
|
||||
/*
|
||||
* The access to some variables in this macro is racy. We can't use atomic_t
|
||||
|
|
|
@ -10,11 +10,9 @@
|
|||
#include <linux/dma-direct.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
/*
|
||||
* Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it
|
||||
|
@ -304,19 +302,6 @@ void dma_direct_free(struct device *dev, size_t size,
|
|||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t paddr = dma_to_phys(dev, addr);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_device(paddr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_sync_single_for_device);
|
||||
|
||||
void dma_direct_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -335,27 +320,11 @@ void dma_direct_sync_sg_for_device(struct device *dev,
|
|||
dir);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
|
||||
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
|
||||
defined(CONFIG_SWIOTLB)
|
||||
void dma_direct_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t addr, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
phys_addr_t paddr = dma_to_phys(dev, addr);
|
||||
|
||||
if (!dev_is_dma_coherent(dev)) {
|
||||
arch_sync_dma_for_cpu(paddr, size, dir);
|
||||
arch_sync_dma_for_cpu_all();
|
||||
}
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
|
||||
|
||||
void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -376,20 +345,6 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
|||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_cpu_all();
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
|
||||
|
||||
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = dma_to_phys(dev, addr);
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(phys)))
|
||||
swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_unmap_page);
|
||||
|
||||
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
|
@ -401,35 +356,8 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
|||
dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
|
||||
attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_unmap_sg);
|
||||
#endif
|
||||
|
||||
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t phys = page_to_phys(page) + offset;
|
||||
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
||||
|
||||
if (unlikely(swiotlb_force == SWIOTLB_FORCE))
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
|
||||
if (swiotlb_force != SWIOTLB_NO_FORCE)
|
||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||
|
||||
dev_WARN_ONCE(dev, 1,
|
||||
"DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
|
||||
&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
|
||||
return DMA_MAPPING_ERROR;
|
||||
}
|
||||
|
||||
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
arch_sync_dma_for_device(phys, size, dir);
|
||||
return dma_addr;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_map_page);
|
||||
|
||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
|
@ -450,7 +378,6 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
|||
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_map_sg);
|
||||
|
||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
|
@ -467,7 +394,6 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
|||
|
||||
return dma_addr;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_direct_map_resource);
|
||||
|
||||
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
|
|
|
@ -105,6 +105,196 @@ void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
}
|
||||
EXPORT_SYMBOL(dmam_alloc_attrs);
|
||||
|
||||
static bool dma_go_direct(struct device *dev, dma_addr_t mask,
|
||||
const struct dma_map_ops *ops)
|
||||
{
|
||||
if (likely(!ops))
|
||||
return true;
|
||||
#ifdef CONFIG_DMA_OPS_BYPASS
|
||||
if (dev->dma_ops_bypass)
|
||||
return min_not_zero(mask, dev->bus_dma_limit) >=
|
||||
dma_direct_get_required_mask(dev);
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Check if the devices uses a direct mapping for streaming DMA operations.
|
||||
* This allows IOMMU drivers to set a bypass mode if the DMA mask is large
|
||||
* enough.
|
||||
*/
|
||||
static inline bool dma_alloc_direct(struct device *dev,
|
||||
const struct dma_map_ops *ops)
|
||||
{
|
||||
return dma_go_direct(dev, dev->coherent_dma_mask, ops);
|
||||
}
|
||||
|
||||
static inline bool dma_map_direct(struct device *dev,
|
||||
const struct dma_map_ops *ops)
|
||||
{
|
||||
return dma_go_direct(dev, *dev->dma_mask, ops);
|
||||
}
|
||||
|
||||
dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
|
||||
size_t offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
|
||||
else
|
||||
addr = ops->map_page(dev, page, offset, size, dir, attrs);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr);
|
||||
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_page_attrs);
|
||||
|
||||
void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
dma_direct_unmap_page(dev, addr, size, dir, attrs);
|
||||
else if (ops->unmap_page)
|
||||
ops->unmap_page(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_page(dev, addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_page_attrs);
|
||||
|
||||
/*
|
||||
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
|
||||
* It should never return a value < 0.
|
||||
*/
|
||||
int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
int ents;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
|
||||
else
|
||||
ents = ops->map_sg(dev, sg, nents, dir, attrs);
|
||||
BUG_ON(ents < 0);
|
||||
debug_dma_map_sg(dev, sg, nents, ents, dir);
|
||||
|
||||
return ents;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_sg_attrs);
|
||||
|
||||
void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
debug_dma_unmap_sg(dev, sg, nents, dir);
|
||||
if (dma_map_direct(dev, ops))
|
||||
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
|
||||
else if (ops->unmap_sg)
|
||||
ops->unmap_sg(dev, sg, nents, dir, attrs);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_sg_attrs);
|
||||
|
||||
dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
dma_addr_t addr = DMA_MAPPING_ERROR;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
/* Don't allow RAM to be mapped */
|
||||
if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
if (dma_map_direct(dev, ops))
|
||||
addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
|
||||
else if (ops->map_resource)
|
||||
addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
|
||||
|
||||
debug_dma_map_resource(dev, phys_addr, size, dir, addr);
|
||||
return addr;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_resource);
|
||||
|
||||
void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (!dma_map_direct(dev, ops) && ops->unmap_resource)
|
||||
ops->unmap_resource(dev, addr, size, dir, attrs);
|
||||
debug_dma_unmap_resource(dev, addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_resource);
|
||||
|
||||
void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_cpu)
|
||||
ops->sync_single_for_cpu(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_for_cpu);
|
||||
|
||||
void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
dma_direct_sync_single_for_device(dev, addr, size, dir);
|
||||
else if (ops->sync_single_for_device)
|
||||
ops->sync_single_for_device(dev, addr, size, dir);
|
||||
debug_dma_sync_single_for_device(dev, addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_single_for_device);
|
||||
|
||||
void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_cpu)
|
||||
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
|
||||
|
||||
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
int nelems, enum dma_data_direction dir)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
if (dma_map_direct(dev, ops))
|
||||
dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
else if (ops->sync_sg_for_device)
|
||||
ops->sync_sg_for_device(dev, sg, nelems, dir);
|
||||
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_sync_sg_for_device);
|
||||
|
||||
/*
|
||||
* Create scatter-list for the already allocated DMA buffer.
|
||||
*/
|
||||
|
@ -138,7 +328,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
|
|||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
|
||||
size, attrs);
|
||||
if (!ops->get_sgtable)
|
||||
|
@ -208,7 +398,7 @@ bool dma_can_mmap(struct device *dev)
|
|||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
return dma_direct_can_mmap(dev);
|
||||
return ops->mmap != NULL;
|
||||
}
|
||||
|
@ -233,7 +423,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
|||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
|
||||
attrs);
|
||||
if (!ops->mmap)
|
||||
|
@ -246,7 +436,7 @@ u64 dma_get_required_mask(struct device *dev)
|
|||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
return dma_direct_get_required_mask(dev);
|
||||
if (ops->get_required_mask)
|
||||
return ops->get_required_mask(dev);
|
||||
|
@ -277,7 +467,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
/* let the implementation decide on the zone to allocate from: */
|
||||
flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
|
||||
else if (ops->alloc)
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
@ -309,7 +499,7 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
|||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
else if (ops->free)
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
|
@ -320,7 +510,11 @@ int dma_supported(struct device *dev, u64 mask)
|
|||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
/*
|
||||
* ->dma_supported sets the bypass flag, so we must always call
|
||||
* into the method here unless the device is truly direct mapped.
|
||||
*/
|
||||
if (!ops)
|
||||
return dma_direct_supported(dev, mask);
|
||||
if (!ops->dma_supported)
|
||||
return 1;
|
||||
|
@ -376,7 +570,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_alloc_direct(dev, ops))
|
||||
arch_dma_cache_sync(dev, vaddr, size, dir);
|
||||
else if (ops->cache_sync)
|
||||
ops->cache_sync(dev, vaddr, size, dir);
|
||||
|
@ -388,7 +582,7 @@ size_t dma_max_mapping_size(struct device *dev)
|
|||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
size_t size = SIZE_MAX;
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_map_direct(dev, ops))
|
||||
size = dma_direct_max_mapping_size(dev);
|
||||
else if (ops && ops->max_mapping_size)
|
||||
size = ops->max_mapping_size(dev);
|
||||
|
@ -401,7 +595,7 @@ bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
|
|||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
if (dma_map_direct(dev, ops))
|
||||
return dma_direct_need_sync(dev, dma_addr);
|
||||
return ops->sync_single_for_cpu || ops->sync_single_for_device;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue