IOMMU Updates for Linux v4.7
The updates include: * Rate limiting for the VT-d fault handler * Remove statistics code from the AMD IOMMU driver. It is unused and should be replaced by something more generic if needed * Per-domain pagesize-bitmaps in IOMMU core code to support systems with different types of IOMMUs * Support for ACPI devices in the AMD IOMMU driver * 4GB mode support for Mediatek IOMMU driver * ARM-SMMU updates from Will Deacon: - Support for 64k pages with SMMUv1 implementations (e.g MMU-401) - Remove open-coded 64-bit MMIO accessors - Initial support for 16-bit VMIDs, as supported by some ThunderX SMMU implementations - A couple of errata workarounds for silicon in the field * Various fixes here and there -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABAgAGBQJXPeM1AAoJECvwRC2XARrjA2QP/2Cz+pVkpQCuvhAse57eN4rB wWXKTjqSFZ4PcA3Vu5yvX6XMv15g46xXFJAhf2spE5//8+xgFfYBgkBRpnqu1brw SL6f8A912MnfMRgWqcdKkJNeHbiN0kOvcIQv1J8GNfciqMiyYFhiLP6fFiRmWR/F XDBjUeFZ5+Uwf1BAGqw0cVPexeakEbsLHUGqxFsh5g2T4i43aHzO2HJT3IdwWHDt F2ivs8gNFGBeJEyzhW8TD0rOEEyHAnM3N18qPEU9+dD0UmjnTQPymEZSbsGW5d4j Cn40QYlA+Zmbwgx6LaDVChzQyRJu6O3uvFThyRviiYKCri/Nc9cUT4vHsFGU4MXb 1d3bqrgzaw7vw31BN7S1Py3MV+WpVnEYjFm2O+hW28OjtSpm6ZvbI8wc0rF4UT/I KgL0gSeA8tp25uVISM+ktpIrObYsAcoCz8nvurpDv2AGkKRzhyoSze0Jg43rusD8 BH7iFWu1LRPlulTGlrHMtNmbZeEApUPbObcQAOcrBOj9vjuFaZ8qduZmB+hwS2iV p9atn+54LmGO0LuzqsGrhApIeXTeTZSrGyjlbUADWBJlTw8Xyk/CR39Wf3m/Xmpr DiJ/5oa8SKQtNbwvbScn1+sInNWP/pH/JgnRO3Yvqth8HWF/DlpzNj5XxAB8czwr qjk9WjpEXun50ocPFQeS =jpPD -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull IOMMU updates from Joerg Roedel: "The updates include: - rate limiting for the VT-d fault handler - remove statistics code from the AMD IOMMU driver. It is unused and should be replaced by something more generic if needed - per-domain pagesize-bitmaps in IOMMU core code to support systems with different types of IOMMUs - support for ACPI devices in the AMD IOMMU driver - 4GB mode support for Mediatek IOMMU driver - ARM-SMMU updates from Will Deacon: - support for 64k pages with SMMUv1 implementations (e.g MMU-401) - remove open-coded 64-bit MMIO accessors - initial support for 16-bit VMIDs, as supported by some ThunderX SMMU implementations - a couple of errata workarounds for silicon in the field - various fixes here and there" * tag 'iommu-updates-v4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (44 commits) iommu/arm-smmu: Use per-domain page sizes. iommu/amd: Remove statistics code iommu/dma: Finish optimising higher-order allocations iommu: Allow selecting page sizes per domain iommu: of: enforce const-ness of struct iommu_ops iommu: remove unused priv field from struct iommu_ops iommu/dma: Implement scatterlist segment merging iommu/arm-smmu: Clear cache lock bit of ACR iommu/arm-smmu: Support SMMUv1 64KB supplement iommu/arm-smmu: Decouple context format from kernel config iommu/arm-smmu: Tidy up 64-bit/atomic I/O accesses io-64-nonatomic: Add relaxed accessor variants iommu/arm-smmu: Work around MMU-500 prefetch errata iommu/arm-smmu: Convert ThunderX workaround to new method iommu/arm-smmu: Differentiate specific implementations iommu/arm-smmu: Workaround for ThunderX erratum #27704 iommu/arm-smmu: Add support for 16 bit VMID iommu/amd: Move get_device_id() and friends to beginning of file iommu/amd: Don't use IS_ERR_VALUE to check integer values iommu/amd: Signedness bug in acpihid_device_group() ...
This commit is contained in:
commit
e0fb1b3639
|
@ -53,7 +53,9 @@ stable kernels.
|
|||
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
|
||||
| ARM | Cortex-A57 | #852523 | N/A |
|
||||
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
|
||||
| ARM | MMU-500 | #841119,#826419 | N/A |
|
||||
| | | | |
|
||||
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
|
||||
| Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
|
||||
| Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
|
||||
| Cavium | ThunderX SMMUv2 | #27704 | N/A |
|
||||
|
|
|
@ -16,6 +16,7 @@ conditions.
|
|||
"arm,mmu-400"
|
||||
"arm,mmu-401"
|
||||
"arm,mmu-500"
|
||||
"cavium,smmu-v2"
|
||||
|
||||
depending on the particular implementation and/or the
|
||||
version of the architecture implemented.
|
||||
|
|
|
@ -1787,6 +1787,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
PCI device 00:14.0 write the parameter as:
|
||||
ivrs_hpet[0]=00:14.0
|
||||
|
||||
ivrs_acpihid [HW,X86_64]
|
||||
Provide an override to the ACPI-HID:UID<->DEVICE-ID
|
||||
mapping provided in the IVRS ACPI table. For
|
||||
example, to map UART-HID:UID AMD0020:0 to
|
||||
PCI device 00:14.5 write the parameter as:
|
||||
ivrs_acpihid[00:14.5]=AMD0020:0
|
||||
|
||||
js= [HW,JOY] Analog joystick
|
||||
See Documentation/input/joystick.txt.
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ static inline unsigned long dma_max_pfn(struct device *dev)
|
|||
|
||||
#define arch_setup_dma_ops arch_setup_dma_ops
|
||||
extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent);
|
||||
const struct iommu_ops *iommu, bool coherent);
|
||||
|
||||
#define arch_teardown_dma_ops arch_teardown_dma_ops
|
||||
extern void arch_teardown_dma_ops(struct device *dev);
|
||||
|
|
|
@ -2215,7 +2215,7 @@ static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
|||
}
|
||||
|
||||
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu)
|
||||
const struct iommu_ops *iommu)
|
||||
{
|
||||
struct dma_iommu_mapping *mapping;
|
||||
|
||||
|
@ -2253,7 +2253,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
|||
#else
|
||||
|
||||
static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu)
|
||||
const struct iommu_ops *iommu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -2270,7 +2270,7 @@ static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
|
|||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent)
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
struct dma_map_ops *dma_ops;
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||
}
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent);
|
||||
const struct iommu_ops *iommu, bool coherent);
|
||||
#define arch_setup_dma_ops arch_setup_dma_ops
|
||||
|
||||
#ifdef CONFIG_IOMMU_DMA
|
||||
|
|
|
@ -562,8 +562,8 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
|||
struct page **pages;
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
|
||||
|
||||
pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
|
||||
flush_page);
|
||||
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
|
||||
handle, flush_page);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
|
@ -947,13 +947,13 @@ void arch_teardown_dma_ops(struct device *dev)
|
|||
#else
|
||||
|
||||
static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu)
|
||||
const struct iommu_ops *iommu)
|
||||
{ }
|
||||
|
||||
#endif /* CONFIG_IOMMU_DMA */
|
||||
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent)
|
||||
const struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
if (!dev->archdata.dma_ops)
|
||||
dev->archdata.dma_ops = &swiotlb_dma_ops;
|
||||
|
|
|
@ -76,8 +76,7 @@ config IOMMU_DMA
|
|||
|
||||
config FSL_PAMU
|
||||
bool "Freescale IOMMU support"
|
||||
depends on PPC32
|
||||
depends on PPC_E500MC || COMPILE_TEST
|
||||
depends on PPC_E500MC || (COMPILE_TEST && PPC)
|
||||
select IOMMU_API
|
||||
select GENERIC_ALLOCATOR
|
||||
help
|
||||
|
@ -124,16 +123,6 @@ config AMD_IOMMU
|
|||
your BIOS for an option to enable it or if you have an IVRS ACPI
|
||||
table.
|
||||
|
||||
config AMD_IOMMU_STATS
|
||||
bool "Export AMD IOMMU statistics to debugfs"
|
||||
depends on AMD_IOMMU
|
||||
select DEBUG_FS
|
||||
---help---
|
||||
This option enables code in the AMD IOMMU driver to collect various
|
||||
statistics about whats happening in the driver and exports that
|
||||
information to userspace via debugfs.
|
||||
If unsure, say N.
|
||||
|
||||
config AMD_IOMMU_V2
|
||||
tristate "AMD IOMMU Version 2 driver"
|
||||
depends on AMD_IOMMU
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/amba/bus.h>
|
||||
#include <linux/pci-ats.h>
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -72,6 +74,7 @@ static DEFINE_SPINLOCK(dev_data_list_lock);
|
|||
|
||||
LIST_HEAD(ioapic_map);
|
||||
LIST_HEAD(hpet_map);
|
||||
LIST_HEAD(acpihid_map);
|
||||
|
||||
/*
|
||||
* Domain for untranslated devices - only allocated
|
||||
|
@ -162,18 +165,65 @@ struct dma_ops_domain {
|
|||
*
|
||||
****************************************************************************/
|
||||
|
||||
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||
static inline int match_hid_uid(struct device *dev,
|
||||
struct acpihid_map_entry *entry)
|
||||
{
|
||||
return container_of(dom, struct protection_domain, domain);
|
||||
const char *hid, *uid;
|
||||
|
||||
hid = acpi_device_hid(ACPI_COMPANION(dev));
|
||||
uid = acpi_device_uid(ACPI_COMPANION(dev));
|
||||
|
||||
if (!hid || !(*hid))
|
||||
return -ENODEV;
|
||||
|
||||
if (!uid || !(*uid))
|
||||
return strcmp(hid, entry->hid);
|
||||
|
||||
if (!(*entry->uid))
|
||||
return strcmp(hid, entry->hid);
|
||||
|
||||
return (strcmp(hid, entry->hid) || strcmp(uid, entry->uid));
|
||||
}
|
||||
|
||||
static inline u16 get_device_id(struct device *dev)
|
||||
static inline u16 get_pci_device_id(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
}
|
||||
|
||||
static inline int get_acpihid_device_id(struct device *dev,
|
||||
struct acpihid_map_entry **entry)
|
||||
{
|
||||
struct acpihid_map_entry *p;
|
||||
|
||||
list_for_each_entry(p, &acpihid_map, list) {
|
||||
if (!match_hid_uid(dev, p)) {
|
||||
if (entry)
|
||||
*entry = p;
|
||||
return p->devid;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int get_device_id(struct device *dev)
|
||||
{
|
||||
int devid;
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
devid = get_pci_device_id(dev);
|
||||
else
|
||||
devid = get_acpihid_device_id(dev, NULL);
|
||||
|
||||
return devid;
|
||||
}
|
||||
|
||||
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
|
||||
{
|
||||
return container_of(dom, struct protection_domain, domain);
|
||||
}
|
||||
|
||||
static struct iommu_dev_data *alloc_dev_data(u16 devid)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
|
@ -222,6 +272,7 @@ static u16 get_alias(struct device *dev)
|
|||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
u16 devid, ivrs_alias, pci_alias;
|
||||
|
||||
/* The callers make sure that get_device_id() does not fail here */
|
||||
devid = get_device_id(dev);
|
||||
ivrs_alias = amd_iommu_alias_table[devid];
|
||||
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
|
||||
|
@ -289,6 +340,29 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
|
|||
return dev->archdata.iommu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find or create an IOMMU group for a acpihid device.
|
||||
*/
|
||||
static struct iommu_group *acpihid_device_group(struct device *dev)
|
||||
{
|
||||
struct acpihid_map_entry *p, *entry = NULL;
|
||||
int devid;
|
||||
|
||||
devid = get_acpihid_device_id(dev, &entry);
|
||||
if (devid < 0)
|
||||
return ERR_PTR(devid);
|
||||
|
||||
list_for_each_entry(p, &acpihid_map, list) {
|
||||
if ((devid == p->devid) && p->group)
|
||||
entry->group = p->group;
|
||||
}
|
||||
|
||||
if (!entry->group)
|
||||
entry->group = generic_device_group(dev);
|
||||
|
||||
return entry->group;
|
||||
}
|
||||
|
||||
static bool pci_iommuv2_capable(struct pci_dev *pdev)
|
||||
{
|
||||
static const int caps[] = {
|
||||
|
@ -340,9 +414,11 @@ static void init_unity_mappings_for_device(struct device *dev,
|
|||
struct dma_ops_domain *dma_dom)
|
||||
{
|
||||
struct unity_map_entry *e;
|
||||
u16 devid;
|
||||
int devid;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return;
|
||||
|
||||
list_for_each_entry(e, &amd_iommu_unity_map, list) {
|
||||
if (!(devid >= e->devid_start && devid <= e->devid_end))
|
||||
|
@ -357,16 +433,14 @@ static void init_unity_mappings_for_device(struct device *dev,
|
|||
*/
|
||||
static bool check_device(struct device *dev)
|
||||
{
|
||||
u16 devid;
|
||||
int devid;
|
||||
|
||||
if (!dev || !dev->dma_mask)
|
||||
return false;
|
||||
|
||||
/* No PCI device */
|
||||
if (!dev_is_pci(dev))
|
||||
return false;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return false;
|
||||
|
||||
/* Out of our scope? */
|
||||
if (devid > amd_iommu_last_bdf)
|
||||
|
@ -401,22 +475,26 @@ static void init_iommu_group(struct device *dev)
|
|||
|
||||
static int iommu_init_device(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct iommu_dev_data *dev_data;
|
||||
int devid;
|
||||
|
||||
if (dev->archdata.iommu)
|
||||
return 0;
|
||||
|
||||
dev_data = find_dev_data(get_device_id(dev));
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return devid;
|
||||
|
||||
dev_data = find_dev_data(devid);
|
||||
if (!dev_data)
|
||||
return -ENOMEM;
|
||||
|
||||
dev_data->alias = get_alias(dev);
|
||||
|
||||
if (pci_iommuv2_capable(pdev)) {
|
||||
if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
|
||||
struct amd_iommu *iommu;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
dev_data->iommu_v2 = iommu->is_iommu_v2;
|
||||
}
|
||||
|
||||
|
@ -430,9 +508,13 @@ static int iommu_init_device(struct device *dev)
|
|||
|
||||
static void iommu_ignore_device(struct device *dev)
|
||||
{
|
||||
u16 devid, alias;
|
||||
u16 alias;
|
||||
int devid;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return;
|
||||
|
||||
alias = get_alias(dev);
|
||||
|
||||
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
||||
|
@ -444,8 +526,14 @@ static void iommu_ignore_device(struct device *dev)
|
|||
|
||||
static void iommu_uninit_device(struct device *dev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
|
||||
int devid;
|
||||
struct iommu_dev_data *dev_data;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return;
|
||||
|
||||
dev_data = search_dev_data(devid);
|
||||
if (!dev_data)
|
||||
return;
|
||||
|
||||
|
@ -466,70 +554,6 @@ static void iommu_uninit_device(struct device *dev)
|
|||
*/
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_STATS
|
||||
|
||||
/*
|
||||
* Initialization code for statistics collection
|
||||
*/
|
||||
|
||||
DECLARE_STATS_COUNTER(compl_wait);
|
||||
DECLARE_STATS_COUNTER(cnt_map_single);
|
||||
DECLARE_STATS_COUNTER(cnt_unmap_single);
|
||||
DECLARE_STATS_COUNTER(cnt_map_sg);
|
||||
DECLARE_STATS_COUNTER(cnt_unmap_sg);
|
||||
DECLARE_STATS_COUNTER(cnt_alloc_coherent);
|
||||
DECLARE_STATS_COUNTER(cnt_free_coherent);
|
||||
DECLARE_STATS_COUNTER(cross_page);
|
||||
DECLARE_STATS_COUNTER(domain_flush_single);
|
||||
DECLARE_STATS_COUNTER(domain_flush_all);
|
||||
DECLARE_STATS_COUNTER(alloced_io_mem);
|
||||
DECLARE_STATS_COUNTER(total_map_requests);
|
||||
DECLARE_STATS_COUNTER(complete_ppr);
|
||||
DECLARE_STATS_COUNTER(invalidate_iotlb);
|
||||
DECLARE_STATS_COUNTER(invalidate_iotlb_all);
|
||||
DECLARE_STATS_COUNTER(pri_requests);
|
||||
|
||||
static struct dentry *stats_dir;
|
||||
static struct dentry *de_fflush;
|
||||
|
||||
static void amd_iommu_stats_add(struct __iommu_counter *cnt)
|
||||
{
|
||||
if (stats_dir == NULL)
|
||||
return;
|
||||
|
||||
cnt->dent = debugfs_create_u64(cnt->name, 0444, stats_dir,
|
||||
&cnt->value);
|
||||
}
|
||||
|
||||
static void amd_iommu_stats_init(void)
|
||||
{
|
||||
stats_dir = debugfs_create_dir("amd-iommu", NULL);
|
||||
if (stats_dir == NULL)
|
||||
return;
|
||||
|
||||
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
|
||||
&amd_iommu_unmap_flush);
|
||||
|
||||
amd_iommu_stats_add(&compl_wait);
|
||||
amd_iommu_stats_add(&cnt_map_single);
|
||||
amd_iommu_stats_add(&cnt_unmap_single);
|
||||
amd_iommu_stats_add(&cnt_map_sg);
|
||||
amd_iommu_stats_add(&cnt_unmap_sg);
|
||||
amd_iommu_stats_add(&cnt_alloc_coherent);
|
||||
amd_iommu_stats_add(&cnt_free_coherent);
|
||||
amd_iommu_stats_add(&cross_page);
|
||||
amd_iommu_stats_add(&domain_flush_single);
|
||||
amd_iommu_stats_add(&domain_flush_all);
|
||||
amd_iommu_stats_add(&alloced_io_mem);
|
||||
amd_iommu_stats_add(&total_map_requests);
|
||||
amd_iommu_stats_add(&complete_ppr);
|
||||
amd_iommu_stats_add(&invalidate_iotlb);
|
||||
amd_iommu_stats_add(&invalidate_iotlb_all);
|
||||
amd_iommu_stats_add(&pri_requests);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/****************************************************************************
|
||||
*
|
||||
* Interrupt handling functions
|
||||
|
@ -652,8 +676,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
|
|||
{
|
||||
struct amd_iommu_fault fault;
|
||||
|
||||
INC_STATS_COUNTER(pri_requests);
|
||||
|
||||
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
|
||||
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
|
||||
return;
|
||||
|
@ -2283,13 +2305,17 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
|
|||
static int attach_device(struct device *dev,
|
||||
struct protection_domain *domain)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct pci_dev *pdev;
|
||||
struct iommu_dev_data *dev_data;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
dev_data = get_dev_data(dev);
|
||||
|
||||
if (!dev_is_pci(dev))
|
||||
goto skip_ats_check;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
if (domain->flags & PD_IOMMUV2_MASK) {
|
||||
if (!dev_data->passthrough)
|
||||
return -EINVAL;
|
||||
|
@ -2308,6 +2334,7 @@ static int attach_device(struct device *dev,
|
|||
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
|
||||
}
|
||||
|
||||
skip_ats_check:
|
||||
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
|
||||
ret = __attach_device(dev_data, domain);
|
||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||
|
@ -2364,6 +2391,9 @@ static void detach_device(struct device *dev)
|
|||
__detach_device(dev_data);
|
||||
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
|
||||
|
||||
if (!dev_is_pci(dev))
|
||||
return;
|
||||
|
||||
if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
|
||||
pdev_iommuv2_disable(to_pci_dev(dev));
|
||||
else if (dev_data->ats.enabled)
|
||||
|
@ -2377,13 +2407,15 @@ static int amd_iommu_add_device(struct device *dev)
|
|||
struct iommu_dev_data *dev_data;
|
||||
struct iommu_domain *domain;
|
||||
struct amd_iommu *iommu;
|
||||
u16 devid;
|
||||
int ret;
|
||||
int ret, devid;
|
||||
|
||||
if (!check_device(dev) || get_dev_data(dev))
|
||||
return 0;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return devid;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
|
||||
ret = iommu_init_device(dev);
|
||||
|
@ -2421,18 +2453,29 @@ static int amd_iommu_add_device(struct device *dev)
|
|||
static void amd_iommu_remove_device(struct device *dev)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
u16 devid;
|
||||
int devid;
|
||||
|
||||
if (!check_device(dev))
|
||||
return;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
|
||||
iommu_uninit_device(dev);
|
||||
iommu_completion_wait(iommu);
|
||||
}
|
||||
|
||||
static struct iommu_group *amd_iommu_device_group(struct device *dev)
|
||||
{
|
||||
if (dev_is_pci(dev))
|
||||
return pci_device_group(dev);
|
||||
|
||||
return acpihid_device_group(dev);
|
||||
}
|
||||
|
||||
/*****************************************************************************
|
||||
*
|
||||
* The next functions belong to the dma_ops mapping/unmapping code.
|
||||
|
@ -2597,11 +2640,6 @@ static dma_addr_t __map_single(struct device *dev,
|
|||
pages = iommu_num_pages(paddr, size, PAGE_SIZE);
|
||||
paddr &= PAGE_MASK;
|
||||
|
||||
INC_STATS_COUNTER(total_map_requests);
|
||||
|
||||
if (pages > 1)
|
||||
INC_STATS_COUNTER(cross_page);
|
||||
|
||||
if (align)
|
||||
align_mask = (1UL << get_order(size)) - 1;
|
||||
|
||||
|
@ -2622,8 +2660,6 @@ static dma_addr_t __map_single(struct device *dev,
|
|||
}
|
||||
address += offset;
|
||||
|
||||
ADD_STATS_COUNTER(alloced_io_mem, size);
|
||||
|
||||
if (unlikely(amd_iommu_np_cache)) {
|
||||
domain_flush_pages(&dma_dom->domain, address, size);
|
||||
domain_flush_complete(&dma_dom->domain);
|
||||
|
@ -2671,8 +2707,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
|
|||
start += PAGE_SIZE;
|
||||
}
|
||||
|
||||
SUB_STATS_COUNTER(alloced_io_mem, size);
|
||||
|
||||
dma_ops_free_addresses(dma_dom, dma_addr, pages);
|
||||
}
|
||||
|
||||
|
@ -2688,8 +2722,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
|
|||
struct protection_domain *domain;
|
||||
u64 dma_mask;
|
||||
|
||||
INC_STATS_COUNTER(cnt_map_single);
|
||||
|
||||
domain = get_domain(dev);
|
||||
if (PTR_ERR(domain) == -EINVAL)
|
||||
return (dma_addr_t)paddr;
|
||||
|
@ -2710,8 +2742,6 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|||
{
|
||||
struct protection_domain *domain;
|
||||
|
||||
INC_STATS_COUNTER(cnt_unmap_single);
|
||||
|
||||
domain = get_domain(dev);
|
||||
if (IS_ERR(domain))
|
||||
return;
|
||||
|
@ -2734,8 +2764,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
int mapped_elems = 0;
|
||||
u64 dma_mask;
|
||||
|
||||
INC_STATS_COUNTER(cnt_map_sg);
|
||||
|
||||
domain = get_domain(dev);
|
||||
if (IS_ERR(domain))
|
||||
return 0;
|
||||
|
@ -2781,8 +2809,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
INC_STATS_COUNTER(cnt_unmap_sg);
|
||||
|
||||
domain = get_domain(dev);
|
||||
if (IS_ERR(domain))
|
||||
return;
|
||||
|
@ -2805,8 +2831,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
|
|||
struct protection_domain *domain;
|
||||
struct page *page;
|
||||
|
||||
INC_STATS_COUNTER(cnt_alloc_coherent);
|
||||
|
||||
domain = get_domain(dev);
|
||||
if (PTR_ERR(domain) == -EINVAL) {
|
||||
page = alloc_pages(flag, get_order(size));
|
||||
|
@ -2860,8 +2884,6 @@ static void free_coherent(struct device *dev, size_t size,
|
|||
struct protection_domain *domain;
|
||||
struct page *page;
|
||||
|
||||
INC_STATS_COUNTER(cnt_free_coherent);
|
||||
|
||||
page = virt_to_page(virt_addr);
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
|
@ -2926,7 +2948,17 @@ static struct dma_map_ops amd_iommu_dma_ops = {
|
|||
|
||||
int __init amd_iommu_init_api(void)
|
||||
{
|
||||
return bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
|
||||
int err = 0;
|
||||
|
||||
err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops);
|
||||
if (err)
|
||||
return err;
|
||||
#ifdef CONFIG_ARM_AMBA
|
||||
err = bus_set_iommu(&amba_bustype, &amd_iommu_ops);
|
||||
if (err)
|
||||
return err;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init amd_iommu_init_dma_ops(void)
|
||||
|
@ -2943,8 +2975,6 @@ int __init amd_iommu_init_dma_ops(void)
|
|||
if (!swiotlb)
|
||||
dma_ops = &nommu_dma_ops;
|
||||
|
||||
amd_iommu_stats_init();
|
||||
|
||||
if (amd_iommu_unmap_flush)
|
||||
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
|
||||
else
|
||||
|
@ -3098,12 +3128,14 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
|
|||
{
|
||||
struct iommu_dev_data *dev_data = dev->archdata.iommu;
|
||||
struct amd_iommu *iommu;
|
||||
u16 devid;
|
||||
int devid;
|
||||
|
||||
if (!check_device(dev))
|
||||
return;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return;
|
||||
|
||||
if (dev_data->domain != NULL)
|
||||
detach_device(dev);
|
||||
|
@ -3221,9 +3253,11 @@ static void amd_iommu_get_dm_regions(struct device *dev,
|
|||
struct list_head *head)
|
||||
{
|
||||
struct unity_map_entry *entry;
|
||||
u16 devid;
|
||||
int devid;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
if (devid < 0)
|
||||
return;
|
||||
|
||||
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
|
||||
struct iommu_dm_region *region;
|
||||
|
@ -3270,7 +3304,7 @@ static const struct iommu_ops amd_iommu_ops = {
|
|||
.iova_to_phys = amd_iommu_iova_to_phys,
|
||||
.add_device = amd_iommu_add_device,
|
||||
.remove_device = amd_iommu_remove_device,
|
||||
.device_group = pci_device_group,
|
||||
.device_group = amd_iommu_device_group,
|
||||
.get_dm_regions = amd_iommu_get_dm_regions,
|
||||
.put_dm_regions = amd_iommu_put_dm_regions,
|
||||
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
|
||||
|
@ -3431,8 +3465,6 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
|
|||
static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
|
||||
u64 address)
|
||||
{
|
||||
INC_STATS_COUNTER(invalidate_iotlb);
|
||||
|
||||
return __flush_pasid(domain, pasid, address, false);
|
||||
}
|
||||
|
||||
|
@ -3453,8 +3485,6 @@ EXPORT_SYMBOL(amd_iommu_flush_page);
|
|||
|
||||
static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
|
||||
{
|
||||
INC_STATS_COUNTER(invalidate_iotlb_all);
|
||||
|
||||
return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
|
||||
true);
|
||||
}
|
||||
|
@ -3574,8 +3604,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
|
|||
struct amd_iommu *iommu;
|
||||
struct iommu_cmd cmd;
|
||||
|
||||
INC_STATS_COUNTER(complete_ppr);
|
||||
|
||||
dev_data = get_dev_data(&pdev->dev);
|
||||
iommu = amd_iommu_rlookup_table[dev_data->devid];
|
||||
|
||||
|
@ -3925,6 +3953,9 @@ static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
|
|||
case X86_IRQ_ALLOC_TYPE_MSI:
|
||||
case X86_IRQ_ALLOC_TYPE_MSIX:
|
||||
devid = get_device_id(&info->msi_dev->dev);
|
||||
if (devid < 0)
|
||||
return NULL;
|
||||
|
||||
iommu = amd_iommu_rlookup_table[devid];
|
||||
if (iommu)
|
||||
return iommu->msi_domain;
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
*/
|
||||
#define IVRS_HEADER_LENGTH 48
|
||||
|
||||
#define ACPI_IVHD_TYPE 0x10
|
||||
#define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
|
||||
#define ACPI_IVMD_TYPE_ALL 0x20
|
||||
#define ACPI_IVMD_TYPE 0x21
|
||||
#define ACPI_IVMD_TYPE_RANGE 0x22
|
||||
|
@ -58,6 +58,11 @@
|
|||
#define IVHD_DEV_EXT_SELECT 0x46
|
||||
#define IVHD_DEV_EXT_SELECT_RANGE 0x47
|
||||
#define IVHD_DEV_SPECIAL 0x48
|
||||
#define IVHD_DEV_ACPI_HID 0xf0
|
||||
|
||||
#define UID_NOT_PRESENT 0
|
||||
#define UID_IS_INTEGER 1
|
||||
#define UID_IS_CHARACTER 2
|
||||
|
||||
#define IVHD_SPECIAL_IOAPIC 1
|
||||
#define IVHD_SPECIAL_HPET 2
|
||||
|
@ -99,7 +104,11 @@ struct ivhd_header {
|
|||
u64 mmio_phys;
|
||||
u16 pci_seg;
|
||||
u16 info;
|
||||
u32 efr;
|
||||
u32 efr_attr;
|
||||
|
||||
/* Following only valid on IVHD type 11h and 40h */
|
||||
u64 efr_reg; /* Exact copy of MMIO_EXT_FEATURES */
|
||||
u64 res;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
|
@ -111,6 +120,11 @@ struct ivhd_entry {
|
|||
u16 devid;
|
||||
u8 flags;
|
||||
u32 ext;
|
||||
u32 hidh;
|
||||
u64 cid;
|
||||
u8 uidf;
|
||||
u8 uidl;
|
||||
u8 uid;
|
||||
} __attribute__((packed));
|
||||
|
||||
/*
|
||||
|
@ -133,6 +147,7 @@ bool amd_iommu_irq_remap __read_mostly;
|
|||
|
||||
static bool amd_iommu_detected;
|
||||
static bool __initdata amd_iommu_disabled;
|
||||
static int amd_iommu_target_ivhd_type;
|
||||
|
||||
u16 amd_iommu_last_bdf; /* largest PCI device id we have
|
||||
to handle */
|
||||
|
@ -218,8 +233,12 @@ enum iommu_init_state {
|
|||
#define EARLY_MAP_SIZE 4
|
||||
static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
|
||||
static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
|
||||
static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
|
||||
|
||||
static int __initdata early_ioapic_map_size;
|
||||
static int __initdata early_hpet_map_size;
|
||||
static int __initdata early_acpihid_map_size;
|
||||
|
||||
static bool __initdata cmdline_maps;
|
||||
|
||||
static enum iommu_init_state init_state = IOMMU_START_STATE;
|
||||
|
@ -394,6 +413,22 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
|
|||
release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
|
||||
}
|
||||
|
||||
static inline u32 get_ivhd_header_size(struct ivhd_header *h)
|
||||
{
|
||||
u32 size = 0;
|
||||
|
||||
switch (h->type) {
|
||||
case 0x10:
|
||||
size = 24;
|
||||
break;
|
||||
case 0x11:
|
||||
case 0x40:
|
||||
size = 40;
|
||||
break;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
*
|
||||
* The functions below belong to the first pass of AMD IOMMU ACPI table
|
||||
|
@ -408,7 +443,15 @@ static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
|
|||
*/
|
||||
static inline int ivhd_entry_length(u8 *ivhd)
|
||||
{
|
||||
return 0x04 << (*ivhd >> 6);
|
||||
u32 type = ((struct ivhd_entry *)ivhd)->type;
|
||||
|
||||
if (type < 0x80) {
|
||||
return 0x04 << (*ivhd >> 6);
|
||||
} else if (type == IVHD_DEV_ACPI_HID) {
|
||||
/* For ACPI_HID, offset 21 is uid len */
|
||||
return *((u8 *)ivhd + 21) + 22;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -420,7 +463,14 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
|
|||
u8 *p = (void *)h, *end = (void *)h;
|
||||
struct ivhd_entry *dev;
|
||||
|
||||
p += sizeof(*h);
|
||||
u32 ivhd_size = get_ivhd_header_size(h);
|
||||
|
||||
if (!ivhd_size) {
|
||||
pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
p += ivhd_size;
|
||||
end += h->length;
|
||||
|
||||
while (p < end) {
|
||||
|
@ -448,6 +498,22 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init check_ivrs_checksum(struct acpi_table_header *table)
|
||||
{
|
||||
int i;
|
||||
u8 checksum = 0, *p = (u8 *)table;
|
||||
|
||||
for (i = 0; i < table->length; ++i)
|
||||
checksum += p[i];
|
||||
if (checksum != 0) {
|
||||
/* ACPI table corrupt */
|
||||
pr_err(FW_BUG "AMD-Vi: IVRS invalid checksum\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all IVHD entries in the ACPI table and find the highest device
|
||||
* id which we need to handle. This is the first of three functions which parse
|
||||
|
@ -455,31 +521,19 @@ static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
|
|||
*/
|
||||
static int __init find_last_devid_acpi(struct acpi_table_header *table)
|
||||
{
|
||||
int i;
|
||||
u8 checksum = 0, *p = (u8 *)table, *end = (u8 *)table;
|
||||
u8 *p = (u8 *)table, *end = (u8 *)table;
|
||||
struct ivhd_header *h;
|
||||
|
||||
/*
|
||||
* Validate checksum here so we don't need to do it when
|
||||
* we actually parse the table
|
||||
*/
|
||||
for (i = 0; i < table->length; ++i)
|
||||
checksum += p[i];
|
||||
if (checksum != 0)
|
||||
/* ACPI table corrupt */
|
||||
return -ENODEV;
|
||||
|
||||
p += IVRS_HEADER_LENGTH;
|
||||
|
||||
end += table->length;
|
||||
while (p < end) {
|
||||
h = (struct ivhd_header *)p;
|
||||
switch (h->type) {
|
||||
case ACPI_IVHD_TYPE:
|
||||
find_last_devid_from_ivhd(h);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
if (h->type == amd_iommu_target_ivhd_type) {
|
||||
int ret = find_last_devid_from_ivhd(h);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
p += h->length;
|
||||
}
|
||||
|
@ -724,6 +778,42 @@ static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
|
||||
bool cmd_line)
|
||||
{
|
||||
struct acpihid_map_entry *entry;
|
||||
struct list_head *list = &acpihid_map;
|
||||
|
||||
list_for_each_entry(entry, list, list) {
|
||||
if (strcmp(entry->hid, hid) ||
|
||||
(*uid && *entry->uid && strcmp(entry->uid, uid)) ||
|
||||
!entry->cmd_line)
|
||||
continue;
|
||||
|
||||
pr_info("AMD-Vi: Command-line override for hid:%s uid:%s\n",
|
||||
hid, uid);
|
||||
*devid = entry->devid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(entry->uid, uid, strlen(uid));
|
||||
memcpy(entry->hid, hid, strlen(hid));
|
||||
entry->devid = *devid;
|
||||
entry->cmd_line = cmd_line;
|
||||
entry->root_devid = (entry->devid & (~0x7));
|
||||
|
||||
pr_info("AMD-Vi:%s, add hid:%s, uid:%s, rdevid:%d\n",
|
||||
entry->cmd_line ? "cmd" : "ivrs",
|
||||
entry->hid, entry->uid, entry->root_devid);
|
||||
|
||||
list_add_tail(&entry->list, list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init add_early_maps(void)
|
||||
{
|
||||
int i, ret;
|
||||
|
@ -746,6 +836,15 @@ static int __init add_early_maps(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < early_acpihid_map_size; ++i) {
|
||||
ret = add_acpi_hid_device(early_acpihid_map[i].hid,
|
||||
early_acpihid_map[i].uid,
|
||||
&early_acpihid_map[i].devid,
|
||||
early_acpihid_map[i].cmd_line);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -785,6 +884,7 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
|||
u32 dev_i, ext_flags = 0;
|
||||
bool alias = false;
|
||||
struct ivhd_entry *e;
|
||||
u32 ivhd_size;
|
||||
int ret;
|
||||
|
||||
|
||||
|
@ -800,7 +900,14 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
|||
/*
|
||||
* Done. Now parse the device entries
|
||||
*/
|
||||
p += sizeof(struct ivhd_header);
|
||||
ivhd_size = get_ivhd_header_size(h);
|
||||
if (!ivhd_size) {
|
||||
pr_err("AMD-Vi: Unsupported IVHD type %#x\n", h->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
p += ivhd_size;
|
||||
|
||||
end += h->length;
|
||||
|
||||
|
||||
|
@ -958,6 +1065,70 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
|||
|
||||
break;
|
||||
}
|
||||
case IVHD_DEV_ACPI_HID: {
|
||||
u16 devid;
|
||||
u8 hid[ACPIHID_HID_LEN] = {0};
|
||||
u8 uid[ACPIHID_UID_LEN] = {0};
|
||||
int ret;
|
||||
|
||||
if (h->type != 0x40) {
|
||||
pr_err(FW_BUG "Invalid IVHD device type %#x\n",
|
||||
e->type);
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
|
||||
hid[ACPIHID_HID_LEN - 1] = '\0';
|
||||
|
||||
if (!(*hid)) {
|
||||
pr_err(FW_BUG "Invalid HID.\n");
|
||||
break;
|
||||
}
|
||||
|
||||
switch (e->uidf) {
|
||||
case UID_NOT_PRESENT:
|
||||
|
||||
if (e->uidl != 0)
|
||||
pr_warn(FW_BUG "Invalid UID length.\n");
|
||||
|
||||
break;
|
||||
case UID_IS_INTEGER:
|
||||
|
||||
sprintf(uid, "%d", e->uid);
|
||||
|
||||
break;
|
||||
case UID_IS_CHARACTER:
|
||||
|
||||
memcpy(uid, (u8 *)(&e->uid), ACPIHID_UID_LEN - 1);
|
||||
uid[ACPIHID_UID_LEN - 1] = '\0';
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
|
||||
hid, uid,
|
||||
PCI_BUS_NUM(devid),
|
||||
PCI_SLOT(devid),
|
||||
PCI_FUNC(devid));
|
||||
|
||||
devid = e->devid;
|
||||
flags = e->flags;
|
||||
|
||||
ret = add_acpi_hid_device(hid, uid, &devid, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* add_special_device might update the devid in case a
|
||||
* command-line override is present. So call
|
||||
* set_dev_entry_from_acpi after add_special_device.
|
||||
*/
|
||||
set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1078,13 +1249,25 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
|||
iommu->pci_seg = h->pci_seg;
|
||||
iommu->mmio_phys = h->mmio_phys;
|
||||
|
||||
/* Check if IVHD EFR contains proper max banks/counters */
|
||||
if ((h->efr != 0) &&
|
||||
((h->efr & (0xF << 13)) != 0) &&
|
||||
((h->efr & (0x3F << 17)) != 0)) {
|
||||
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
|
||||
} else {
|
||||
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
|
||||
switch (h->type) {
|
||||
case 0x10:
|
||||
/* Check if IVHD EFR contains proper max banks/counters */
|
||||
if ((h->efr_attr != 0) &&
|
||||
((h->efr_attr & (0xF << 13)) != 0) &&
|
||||
((h->efr_attr & (0x3F << 17)) != 0))
|
||||
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
|
||||
else
|
||||
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
|
||||
break;
|
||||
case 0x11:
|
||||
case 0x40:
|
||||
if (h->efr_reg & (1 << 9))
|
||||
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
|
||||
else
|
||||
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
|
||||
|
@ -1117,6 +1300,32 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_highest_supported_ivhd_type - Look up the appropriate IVHD type
|
||||
* @ivrs Pointer to the IVRS header
|
||||
*
|
||||
* This function search through all IVDB of the maximum supported IVHD
|
||||
*/
|
||||
static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
|
||||
{
|
||||
u8 *base = (u8 *)ivrs;
|
||||
struct ivhd_header *ivhd = (struct ivhd_header *)
|
||||
(base + IVRS_HEADER_LENGTH);
|
||||
u8 last_type = ivhd->type;
|
||||
u16 devid = ivhd->devid;
|
||||
|
||||
while (((u8 *)ivhd - base < ivrs->length) &&
|
||||
(ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
|
||||
u8 *p = (u8 *) ivhd;
|
||||
|
||||
if (ivhd->devid == devid)
|
||||
last_type = ivhd->type;
|
||||
ivhd = (struct ivhd_header *)(p + ivhd->length);
|
||||
}
|
||||
|
||||
return last_type;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterates over all IOMMU entries in the ACPI table, allocates the
|
||||
* IOMMU structure and initializes it with init_iommu_one()
|
||||
|
@ -1133,8 +1342,7 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
|||
|
||||
while (p < end) {
|
||||
h = (struct ivhd_header *)p;
|
||||
switch (*p) {
|
||||
case ACPI_IVHD_TYPE:
|
||||
if (*p == amd_iommu_target_ivhd_type) {
|
||||
|
||||
DUMP_printk("device: %02x:%02x.%01x cap: %04x "
|
||||
"seg: %d flags: %01x info %04x\n",
|
||||
|
@ -1151,9 +1359,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
|
|||
ret = init_iommu_one(iommu, h);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
p += h->length;
|
||||
|
||||
|
@ -1818,18 +2023,20 @@ static void __init free_dma_resources(void)
|
|||
* remapping setup code.
|
||||
*
|
||||
* This function basically parses the ACPI table for AMD IOMMU (IVRS)
|
||||
* three times:
|
||||
* four times:
|
||||
*
|
||||
* 1 pass) Find the highest PCI device id the driver has to handle.
|
||||
* 1 pass) Discover the most comprehensive IVHD type to use.
|
||||
*
|
||||
* 2 pass) Find the highest PCI device id the driver has to handle.
|
||||
* Upon this information the size of the data structures is
|
||||
* determined that needs to be allocated.
|
||||
*
|
||||
* 2 pass) Initialize the data structures just allocated with the
|
||||
* 3 pass) Initialize the data structures just allocated with the
|
||||
* information in the ACPI table about available AMD IOMMUs
|
||||
* in the system. It also maps the PCI devices in the
|
||||
* system to specific IOMMUs
|
||||
*
|
||||
* 3 pass) After the basic data structures are allocated and
|
||||
* 4 pass) After the basic data structures are allocated and
|
||||
* initialized we update them with information about memory
|
||||
* remapping requirements parsed out of the ACPI table in
|
||||
* this last pass.
|
||||
|
@ -1856,6 +2063,17 @@ static int __init early_amd_iommu_init(void)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate checksum here so we don't need to do it when
|
||||
* we actually parse the table
|
||||
*/
|
||||
ret = check_ivrs_checksum(ivrs_base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
|
||||
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
|
||||
|
||||
/*
|
||||
* First parse ACPI tables to find the largest Bus/Dev/Func
|
||||
* we need to handle. Upon this information the shared data
|
||||
|
@ -2259,10 +2477,43 @@ static int __init parse_ivrs_hpet(char *str)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int __init parse_ivrs_acpihid(char *str)
|
||||
{
|
||||
u32 bus, dev, fn;
|
||||
char *hid, *uid, *p;
|
||||
char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
|
||||
int ret, i;
|
||||
|
||||
ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
|
||||
if (ret != 4) {
|
||||
pr_err("AMD-Vi: Invalid command line: ivrs_acpihid(%s)\n", str);
|
||||
return 1;
|
||||
}
|
||||
|
||||
p = acpiid;
|
||||
hid = strsep(&p, ":");
|
||||
uid = p;
|
||||
|
||||
if (!hid || !(*hid) || !uid) {
|
||||
pr_err("AMD-Vi: Invalid command line: hid or uid\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
i = early_acpihid_map_size++;
|
||||
memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
|
||||
memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
|
||||
early_acpihid_map[i].devid =
|
||||
((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
|
||||
early_acpihid_map[i].cmd_line = true;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("amd_iommu_dump", parse_amd_iommu_dump);
|
||||
__setup("amd_iommu=", parse_amd_iommu_options);
|
||||
__setup("ivrs_ioapic", parse_ivrs_ioapic);
|
||||
__setup("ivrs_hpet", parse_ivrs_hpet);
|
||||
__setup("ivrs_acpihid", parse_ivrs_acpihid);
|
||||
|
||||
IOMMU_INIT_FINISH(amd_iommu_detect,
|
||||
gart_iommu_hole_init,
|
||||
|
|
|
@ -527,6 +527,19 @@ struct amd_iommu {
|
|||
#endif
|
||||
};
|
||||
|
||||
#define ACPIHID_UID_LEN 256
|
||||
#define ACPIHID_HID_LEN 9
|
||||
|
||||
struct acpihid_map_entry {
|
||||
struct list_head list;
|
||||
u8 uid[ACPIHID_UID_LEN];
|
||||
u8 hid[ACPIHID_HID_LEN];
|
||||
u16 devid;
|
||||
u16 root_devid;
|
||||
bool cmd_line;
|
||||
struct iommu_group *group;
|
||||
};
|
||||
|
||||
struct devid_map {
|
||||
struct list_head list;
|
||||
u8 id;
|
||||
|
@ -537,6 +550,7 @@ struct devid_map {
|
|||
/* Map HPET and IOAPIC ids to the devid used by the IOMMU */
|
||||
extern struct list_head ioapic_map;
|
||||
extern struct list_head hpet_map;
|
||||
extern struct list_head acpihid_map;
|
||||
|
||||
/*
|
||||
* List with all IOMMUs in the system. This list is not locked because it is
|
||||
|
@ -668,30 +682,4 @@ static inline int get_hpet_devid(int id)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_STATS
|
||||
|
||||
struct __iommu_counter {
|
||||
char *name;
|
||||
struct dentry *dent;
|
||||
u64 value;
|
||||
};
|
||||
|
||||
#define DECLARE_STATS_COUNTER(nm) \
|
||||
static struct __iommu_counter nm = { \
|
||||
.name = #nm, \
|
||||
}
|
||||
|
||||
#define INC_STATS_COUNTER(name) name.value += 1
|
||||
#define ADD_STATS_COUNTER(name, x) name.value += (x)
|
||||
#define SUB_STATS_COUNTER(name, x) name.value -= (x)
|
||||
|
||||
#else /* CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
#define DECLARE_STATS_COUNTER(name)
|
||||
#define INC_STATS_COUNTER(name)
|
||||
#define ADD_STATS_COUNTER(name, x)
|
||||
#define SUB_STATS_COUNTER(name, x)
|
||||
|
||||
#endif /* CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
|
||||
|
|
|
@ -590,6 +590,7 @@ struct arm_smmu_device {
|
|||
|
||||
unsigned long ias; /* IPA */
|
||||
unsigned long oas; /* PA */
|
||||
unsigned long pgsize_bitmap;
|
||||
|
||||
#define ARM_SMMU_MAX_ASIDS (1 << 16)
|
||||
unsigned int asid_bits;
|
||||
|
@ -1516,8 +1517,6 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_ops arm_smmu_ops;
|
||||
|
||||
static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1555,7 +1554,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
|||
}
|
||||
|
||||
pgtbl_cfg = (struct io_pgtable_cfg) {
|
||||
.pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
|
||||
.pgsize_bitmap = smmu->pgsize_bitmap,
|
||||
.ias = ias,
|
||||
.oas = oas,
|
||||
.tlb = &arm_smmu_gather_ops,
|
||||
|
@ -1566,7 +1565,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
|||
if (!pgtbl_ops)
|
||||
return -ENOMEM;
|
||||
|
||||
arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
|
||||
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
|
||||
smmu_domain->pgtbl_ops = pgtbl_ops;
|
||||
|
||||
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
|
||||
|
@ -2410,7 +2409,6 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
|
|||
{
|
||||
u32 reg;
|
||||
bool coherent;
|
||||
unsigned long pgsize_bitmap = 0;
|
||||
|
||||
/* IDR0 */
|
||||
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
|
||||
|
@ -2541,13 +2539,16 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
|
|||
|
||||
/* Page sizes */
|
||||
if (reg & IDR5_GRAN64K)
|
||||
pgsize_bitmap |= SZ_64K | SZ_512M;
|
||||
smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
|
||||
if (reg & IDR5_GRAN16K)
|
||||
pgsize_bitmap |= SZ_16K | SZ_32M;
|
||||
smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
|
||||
if (reg & IDR5_GRAN4K)
|
||||
pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
|
||||
smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
|
||||
|
||||
arm_smmu_ops.pgsize_bitmap &= pgsize_bitmap;
|
||||
if (arm_smmu_ops.pgsize_bitmap == -1UL)
|
||||
arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
|
||||
else
|
||||
arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
|
||||
|
||||
/* Output address size */
|
||||
switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/io-64-nonatomic-hi-lo.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -71,16 +72,15 @@
|
|||
((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
|
||||
? 0x400 : 0))
|
||||
|
||||
/*
|
||||
* Some 64-bit registers only make sense to write atomically, but in such
|
||||
* cases all the data relevant to AArch32 formats lies within the lower word,
|
||||
* therefore this actually makes more sense than it might first appear.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
#define smmu_writeq writeq_relaxed
|
||||
#define smmu_write_atomic_lq writeq_relaxed
|
||||
#else
|
||||
#define smmu_writeq(reg64, addr) \
|
||||
do { \
|
||||
u64 __val = (reg64); \
|
||||
void __iomem *__addr = (addr); \
|
||||
writel_relaxed(__val >> 32, __addr + 4); \
|
||||
writel_relaxed(__val, __addr); \
|
||||
} while (0)
|
||||
#define smmu_write_atomic_lq writel_relaxed
|
||||
#endif
|
||||
|
||||
/* Configuration registers */
|
||||
|
@ -94,9 +94,13 @@
|
|||
#define sCR0_VMIDPNE (1 << 11)
|
||||
#define sCR0_PTM (1 << 12)
|
||||
#define sCR0_FB (1 << 13)
|
||||
#define sCR0_VMID16EN (1 << 31)
|
||||
#define sCR0_BSU_SHIFT 14
|
||||
#define sCR0_BSU_MASK 0x3
|
||||
|
||||
/* Auxiliary Configuration register */
|
||||
#define ARM_SMMU_GR0_sACR 0x10
|
||||
|
||||
/* Identification registers */
|
||||
#define ARM_SMMU_GR0_ID0 0x20
|
||||
#define ARM_SMMU_GR0_ID1 0x24
|
||||
|
@ -116,6 +120,8 @@
|
|||
#define ID0_NTS (1 << 28)
|
||||
#define ID0_SMS (1 << 27)
|
||||
#define ID0_ATOSNS (1 << 26)
|
||||
#define ID0_PTFS_NO_AARCH32 (1 << 25)
|
||||
#define ID0_PTFS_NO_AARCH32S (1 << 24)
|
||||
#define ID0_CTTW (1 << 14)
|
||||
#define ID0_NUMIRPT_SHIFT 16
|
||||
#define ID0_NUMIRPT_MASK 0xff
|
||||
|
@ -141,6 +147,10 @@
|
|||
#define ID2_PTFS_4K (1 << 12)
|
||||
#define ID2_PTFS_16K (1 << 13)
|
||||
#define ID2_PTFS_64K (1 << 14)
|
||||
#define ID2_VMID16 (1 << 15)
|
||||
|
||||
#define ID7_MAJOR_SHIFT 4
|
||||
#define ID7_MAJOR_MASK 0xf
|
||||
|
||||
/* Global TLB invalidation */
|
||||
#define ARM_SMMU_GR0_TLBIVMID 0x64
|
||||
|
@ -193,12 +203,15 @@
|
|||
#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
|
||||
#define CBA2R_RW64_32BIT (0 << 0)
|
||||
#define CBA2R_RW64_64BIT (1 << 0)
|
||||
#define CBA2R_VMID_SHIFT 16
|
||||
#define CBA2R_VMID_MASK 0xffff
|
||||
|
||||
/* Translation context bank */
|
||||
#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
|
||||
#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
|
||||
|
||||
#define ARM_SMMU_CB_SCTLR 0x0
|
||||
#define ARM_SMMU_CB_ACTLR 0x4
|
||||
#define ARM_SMMU_CB_RESUME 0x8
|
||||
#define ARM_SMMU_CB_TTBCR2 0x10
|
||||
#define ARM_SMMU_CB_TTBR0 0x20
|
||||
|
@ -206,11 +219,9 @@
|
|||
#define ARM_SMMU_CB_TTBCR 0x30
|
||||
#define ARM_SMMU_CB_S1_MAIR0 0x38
|
||||
#define ARM_SMMU_CB_S1_MAIR1 0x3c
|
||||
#define ARM_SMMU_CB_PAR_LO 0x50
|
||||
#define ARM_SMMU_CB_PAR_HI 0x54
|
||||
#define ARM_SMMU_CB_PAR 0x50
|
||||
#define ARM_SMMU_CB_FSR 0x58
|
||||
#define ARM_SMMU_CB_FAR_LO 0x60
|
||||
#define ARM_SMMU_CB_FAR_HI 0x64
|
||||
#define ARM_SMMU_CB_FAR 0x60
|
||||
#define ARM_SMMU_CB_FSYNR0 0x68
|
||||
#define ARM_SMMU_CB_S1_TLBIVA 0x600
|
||||
#define ARM_SMMU_CB_S1_TLBIASID 0x610
|
||||
|
@ -230,6 +241,10 @@
|
|||
#define SCTLR_M (1 << 0)
|
||||
#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
|
||||
|
||||
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
|
||||
|
||||
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
|
||||
|
||||
#define CB_PAR_F (1 << 0)
|
||||
|
||||
#define ATSR_ACTIVE (1 << 0)
|
||||
|
@ -270,10 +285,17 @@ MODULE_PARM_DESC(disable_bypass,
|
|||
"Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
|
||||
|
||||
enum arm_smmu_arch_version {
|
||||
ARM_SMMU_V1 = 1,
|
||||
ARM_SMMU_V1,
|
||||
ARM_SMMU_V1_64K,
|
||||
ARM_SMMU_V2,
|
||||
};
|
||||
|
||||
enum arm_smmu_implementation {
|
||||
GENERIC_SMMU,
|
||||
ARM_MMU500,
|
||||
CAVIUM_SMMUV2,
|
||||
};
|
||||
|
||||
struct arm_smmu_smr {
|
||||
u8 idx;
|
||||
u16 mask;
|
||||
|
@ -305,11 +327,18 @@ struct arm_smmu_device {
|
|||
#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
|
||||
#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
|
||||
#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
|
||||
#define ARM_SMMU_FEAT_VMID16 (1 << 6)
|
||||
#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
|
||||
#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
|
||||
#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
|
||||
#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
|
||||
#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
|
||||
u32 features;
|
||||
|
||||
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
|
||||
u32 options;
|
||||
enum arm_smmu_arch_version version;
|
||||
enum arm_smmu_implementation model;
|
||||
|
||||
u32 num_context_banks;
|
||||
u32 num_s2_context_banks;
|
||||
|
@ -322,6 +351,7 @@ struct arm_smmu_device {
|
|||
unsigned long va_size;
|
||||
unsigned long ipa_size;
|
||||
unsigned long pa_size;
|
||||
unsigned long pgsize_bitmap;
|
||||
|
||||
u32 num_global_irqs;
|
||||
u32 num_context_irqs;
|
||||
|
@ -329,17 +359,27 @@ struct arm_smmu_device {
|
|||
|
||||
struct list_head list;
|
||||
struct rb_root masters;
|
||||
|
||||
u32 cavium_id_base; /* Specific to Cavium */
|
||||
};
|
||||
|
||||
enum arm_smmu_context_fmt {
|
||||
ARM_SMMU_CTX_FMT_NONE,
|
||||
ARM_SMMU_CTX_FMT_AARCH64,
|
||||
ARM_SMMU_CTX_FMT_AARCH32_L,
|
||||
ARM_SMMU_CTX_FMT_AARCH32_S,
|
||||
};
|
||||
|
||||
struct arm_smmu_cfg {
|
||||
u8 cbndx;
|
||||
u8 irptndx;
|
||||
u32 cbar;
|
||||
enum arm_smmu_context_fmt fmt;
|
||||
};
|
||||
#define INVALID_IRPTNDX 0xff
|
||||
|
||||
#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
|
||||
#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
|
||||
#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
|
||||
#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
|
||||
|
||||
enum arm_smmu_domain_stage {
|
||||
ARM_SMMU_DOMAIN_S1 = 0,
|
||||
|
@ -357,8 +397,6 @@ struct arm_smmu_domain {
|
|||
struct iommu_domain domain;
|
||||
};
|
||||
|
||||
static struct iommu_ops arm_smmu_ops;
|
||||
|
||||
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
|
||||
static LIST_HEAD(arm_smmu_devices);
|
||||
|
||||
|
@ -367,6 +405,8 @@ struct arm_smmu_option_prop {
|
|||
const char *prop;
|
||||
};
|
||||
|
||||
static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
|
||||
|
||||
static struct arm_smmu_option_prop arm_smmu_options[] = {
|
||||
{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
|
||||
{ 0, NULL},
|
||||
|
@ -578,11 +618,11 @@ static void arm_smmu_tlb_inv_context(void *cookie)
|
|||
|
||||
if (stage1) {
|
||||
base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
writel_relaxed(ARM_SMMU_CB_ASID(cfg),
|
||||
writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
|
||||
base + ARM_SMMU_CB_S1_TLBIASID);
|
||||
} else {
|
||||
base = ARM_SMMU_GR0(smmu);
|
||||
writel_relaxed(ARM_SMMU_CB_VMID(cfg),
|
||||
writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
|
||||
base + ARM_SMMU_GR0_TLBIVMID);
|
||||
}
|
||||
|
||||
|
@ -602,37 +642,33 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
|
|||
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
|
||||
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
|
||||
iova &= ~12UL;
|
||||
iova |= ARM_SMMU_CB_ASID(cfg);
|
||||
iova |= ARM_SMMU_CB_ASID(smmu, cfg);
|
||||
do {
|
||||
writel_relaxed(iova, reg);
|
||||
iova += granule;
|
||||
} while (size -= granule);
|
||||
#ifdef CONFIG_64BIT
|
||||
} else {
|
||||
iova >>= 12;
|
||||
iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
|
||||
iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
|
||||
do {
|
||||
writeq_relaxed(iova, reg);
|
||||
iova += granule >> 12;
|
||||
} while (size -= granule);
|
||||
#endif
|
||||
}
|
||||
#ifdef CONFIG_64BIT
|
||||
} else if (smmu->version == ARM_SMMU_V2) {
|
||||
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
|
||||
ARM_SMMU_CB_S2_TLBIIPAS2;
|
||||
iova >>= 12;
|
||||
do {
|
||||
writeq_relaxed(iova, reg);
|
||||
smmu_write_atomic_lq(iova, reg);
|
||||
iova += granule >> 12;
|
||||
} while (size -= granule);
|
||||
#endif
|
||||
} else {
|
||||
reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
|
||||
writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
|
||||
writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -645,7 +681,7 @@ static struct iommu_gather_ops arm_smmu_gather_ops = {
|
|||
static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
||||
{
|
||||
int flags, ret;
|
||||
u32 fsr, far, fsynr, resume;
|
||||
u32 fsr, fsynr, resume;
|
||||
unsigned long iova;
|
||||
struct iommu_domain *domain = dev;
|
||||
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
|
||||
|
@ -667,13 +703,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
|
|||
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
|
||||
flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
|
||||
|
||||
far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
|
||||
iova = far;
|
||||
#ifdef CONFIG_64BIT
|
||||
far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
|
||||
iova |= ((unsigned long)far << 32);
|
||||
#endif
|
||||
|
||||
iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
|
||||
if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
|
||||
ret = IRQ_HANDLED;
|
||||
resume = RESUME_RETRY;
|
||||
|
@ -734,22 +764,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
|
||||
|
||||
if (smmu->version > ARM_SMMU_V1) {
|
||||
/*
|
||||
* CBA2R.
|
||||
* *Must* be initialised before CBAR thanks to VMID16
|
||||
* architectural oversight affected some implementations.
|
||||
*/
|
||||
#ifdef CONFIG_64BIT
|
||||
reg = CBA2R_RW64_64BIT;
|
||||
#else
|
||||
reg = CBA2R_RW64_32BIT;
|
||||
#endif
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
|
||||
reg = CBA2R_RW64_64BIT;
|
||||
else
|
||||
reg = CBA2R_RW64_32BIT;
|
||||
/* 16-bit VMIDs live in CBA2R */
|
||||
if (smmu->features & ARM_SMMU_FEAT_VMID16)
|
||||
reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
|
||||
|
||||
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
|
||||
}
|
||||
|
||||
/* CBAR */
|
||||
reg = cfg->cbar;
|
||||
if (smmu->version == ARM_SMMU_V1)
|
||||
if (smmu->version < ARM_SMMU_V2)
|
||||
reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
|
||||
|
||||
/*
|
||||
|
@ -759,8 +787,9 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
if (stage1) {
|
||||
reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
|
||||
(CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
|
||||
} else {
|
||||
reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
|
||||
} else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
|
||||
/* 8-bit VMIDs live in CBAR */
|
||||
reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
|
||||
}
|
||||
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
|
||||
|
||||
|
@ -768,15 +797,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
|
|||
if (stage1) {
|
||||
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
|
||||
|
||||
reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
|
||||
smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
|
||||
reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
|
||||
reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
|
||||
smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
|
||||
reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
|
||||
} else {
|
||||
reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
|
||||
smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
|
||||
}
|
||||
|
||||
/* TTBCR */
|
||||
|
@ -855,16 +884,40 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
|
||||
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
|
||||
|
||||
/*
|
||||
* Choosing a suitable context format is even more fiddly. Until we
|
||||
* grow some way for the caller to express a preference, and/or move
|
||||
* the decision into the io-pgtable code where it arguably belongs,
|
||||
* just aim for the closest thing to the rest of the system, and hope
|
||||
* that the hardware isn't esoteric enough that we can't assume AArch64
|
||||
* support to be a superset of AArch32 support...
|
||||
*/
|
||||
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
|
||||
cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
|
||||
if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
|
||||
(smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
|
||||
ARM_SMMU_FEAT_FMT_AARCH64_16K |
|
||||
ARM_SMMU_FEAT_FMT_AARCH64_4K)))
|
||||
cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
|
||||
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
switch (smmu_domain->stage) {
|
||||
case ARM_SMMU_DOMAIN_S1:
|
||||
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
|
||||
start = smmu->num_s2_context_banks;
|
||||
ias = smmu->va_size;
|
||||
oas = smmu->ipa_size;
|
||||
if (IS_ENABLED(CONFIG_64BIT))
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
|
||||
fmt = ARM_64_LPAE_S1;
|
||||
else
|
||||
} else {
|
||||
fmt = ARM_32_LPAE_S1;
|
||||
ias = min(ias, 32UL);
|
||||
oas = min(oas, 40UL);
|
||||
}
|
||||
break;
|
||||
case ARM_SMMU_DOMAIN_NESTED:
|
||||
/*
|
||||
|
@ -876,10 +929,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
start = 0;
|
||||
ias = smmu->ipa_size;
|
||||
oas = smmu->pa_size;
|
||||
if (IS_ENABLED(CONFIG_64BIT))
|
||||
if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
|
||||
fmt = ARM_64_LPAE_S2;
|
||||
else
|
||||
} else {
|
||||
fmt = ARM_32_LPAE_S2;
|
||||
ias = min(ias, 40UL);
|
||||
oas = min(oas, 40UL);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -892,7 +948,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
goto out_unlock;
|
||||
|
||||
cfg->cbndx = ret;
|
||||
if (smmu->version == ARM_SMMU_V1) {
|
||||
if (smmu->version < ARM_SMMU_V2) {
|
||||
cfg->irptndx = atomic_inc_return(&smmu->irptndx);
|
||||
cfg->irptndx %= smmu->num_context_irqs;
|
||||
} else {
|
||||
|
@ -900,7 +956,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
}
|
||||
|
||||
pgtbl_cfg = (struct io_pgtable_cfg) {
|
||||
.pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
|
||||
.pgsize_bitmap = smmu->pgsize_bitmap,
|
||||
.ias = ias,
|
||||
.oas = oas,
|
||||
.tlb = &arm_smmu_gather_ops,
|
||||
|
@ -914,8 +970,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
|
|||
goto out_clear_smmu;
|
||||
}
|
||||
|
||||
/* Update our support page sizes to reflect the page table format */
|
||||
arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
|
||||
/* Update the domain's page sizes to reflect the page table format */
|
||||
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
|
||||
|
||||
/* Initialise the context bank with our page table cfg */
|
||||
arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
|
||||
|
@ -1252,8 +1308,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
/* ATS1 registers can only be written atomically */
|
||||
va = iova & ~0xfffUL;
|
||||
if (smmu->version == ARM_SMMU_V2)
|
||||
smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
|
||||
else
|
||||
smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
|
||||
else /* Register is only 32-bit in v1 */
|
||||
writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
|
||||
|
||||
if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
|
||||
|
@ -1264,9 +1320,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
|
|||
return ops->iova_to_phys(ops, iova);
|
||||
}
|
||||
|
||||
phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
|
||||
phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
|
||||
|
||||
phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
|
||||
if (phys & CB_PAR_F) {
|
||||
dev_err(dev, "translation fault!\n");
|
||||
dev_err(dev, "PAR = 0x%llx\n", phys);
|
||||
|
@ -1492,7 +1546,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
|
||||
void __iomem *cb_base;
|
||||
int i = 0;
|
||||
u32 reg;
|
||||
u32 reg, major;
|
||||
|
||||
/* clear global FSR */
|
||||
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
|
||||
|
@ -1505,11 +1559,33 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
|
||||
}
|
||||
|
||||
/*
|
||||
* Before clearing ARM_MMU500_ACTLR_CPRE, need to
|
||||
* clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
|
||||
* bit is only present in MMU-500r2 onwards.
|
||||
*/
|
||||
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
|
||||
major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
|
||||
if ((smmu->model == ARM_MMU500) && (major >= 2)) {
|
||||
reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
|
||||
reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
|
||||
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
|
||||
}
|
||||
|
||||
/* Make sure all context banks are disabled and clear CB_FSR */
|
||||
for (i = 0; i < smmu->num_context_banks; ++i) {
|
||||
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
|
||||
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
|
||||
writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
|
||||
/*
|
||||
* Disable MMU-500's not-particularly-beneficial next-page
|
||||
* prefetcher for the sake of errata #841119 and #826419.
|
||||
*/
|
||||
if (smmu->model == ARM_MMU500) {
|
||||
reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
|
||||
reg &= ~ARM_MMU500_ACTLR_CPRE;
|
||||
writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
|
||||
}
|
||||
}
|
||||
|
||||
/* Invalidate the TLB, just in case */
|
||||
|
@ -1537,6 +1613,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||
/* Don't upgrade barriers */
|
||||
reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_VMID16)
|
||||
reg |= sCR0_VMID16EN;
|
||||
|
||||
/* Push the button */
|
||||
__arm_smmu_tlb_sync(smmu);
|
||||
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||
|
@ -1569,7 +1648,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
bool cttw_dt, cttw_reg;
|
||||
|
||||
dev_notice(smmu->dev, "probing hardware configuration...\n");
|
||||
dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
|
||||
dev_notice(smmu->dev, "SMMUv%d with:\n",
|
||||
smmu->version == ARM_SMMU_V2 ? 2 : 1);
|
||||
|
||||
/* ID0 */
|
||||
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
|
||||
|
@ -1601,7 +1681,8 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
|
||||
if ((id & ID0_S1TS) &&
|
||||
((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
|
||||
smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
|
||||
dev_notice(smmu->dev, "\taddress translation ops\n");
|
||||
}
|
||||
|
@ -1657,6 +1738,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
ID0_NUMSIDB_MASK;
|
||||
}
|
||||
|
||||
if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
|
||||
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
|
||||
if (!(id & ID0_PTFS_NO_AARCH32S))
|
||||
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
|
||||
}
|
||||
|
||||
/* ID1 */
|
||||
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
|
||||
smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
|
||||
|
@ -1677,6 +1764,17 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
}
|
||||
dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
|
||||
smmu->num_context_banks, smmu->num_s2_context_banks);
|
||||
/*
|
||||
* Cavium CN88xx erratum #27704.
|
||||
* Ensure ASID and VMID allocation is unique across all SMMUs in
|
||||
* the system.
|
||||
*/
|
||||
if (smmu->model == CAVIUM_SMMUV2) {
|
||||
smmu->cavium_id_base =
|
||||
atomic_add_return(smmu->num_context_banks,
|
||||
&cavium_smmu_context_count);
|
||||
smmu->cavium_id_base -= smmu->num_context_banks;
|
||||
}
|
||||
|
||||
/* ID2 */
|
||||
id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
|
||||
|
@ -1687,6 +1785,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
|
||||
smmu->pa_size = size;
|
||||
|
||||
if (id & ID2_VMID16)
|
||||
smmu->features |= ARM_SMMU_FEAT_VMID16;
|
||||
|
||||
/*
|
||||
* What the page table walker can address actually depends on which
|
||||
* descriptor format is in use, but since a) we don't know that yet,
|
||||
|
@ -1696,26 +1797,39 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
dev_warn(smmu->dev,
|
||||
"failed to set DMA mask for table walker\n");
|
||||
|
||||
if (smmu->version == ARM_SMMU_V1) {
|
||||
if (smmu->version < ARM_SMMU_V2) {
|
||||
smmu->va_size = smmu->ipa_size;
|
||||
size = SZ_4K | SZ_2M | SZ_1G;
|
||||
if (smmu->version == ARM_SMMU_V1_64K)
|
||||
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
|
||||
} else {
|
||||
size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
|
||||
smmu->va_size = arm_smmu_id_size_to_bits(size);
|
||||
#ifndef CONFIG_64BIT
|
||||
smmu->va_size = min(32UL, smmu->va_size);
|
||||
#endif
|
||||
size = 0;
|
||||
if (id & ID2_PTFS_4K)
|
||||
size |= SZ_4K | SZ_2M | SZ_1G;
|
||||
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
|
||||
if (id & ID2_PTFS_16K)
|
||||
size |= SZ_16K | SZ_32M;
|
||||
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
|
||||
if (id & ID2_PTFS_64K)
|
||||
size |= SZ_64K | SZ_512M;
|
||||
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
|
||||
}
|
||||
|
||||
arm_smmu_ops.pgsize_bitmap &= size;
|
||||
dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
|
||||
/* Now we've corralled the various formats, what'll it do? */
|
||||
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
|
||||
smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
|
||||
if (smmu->features &
|
||||
(ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
|
||||
smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
|
||||
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
|
||||
smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
|
||||
if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
|
||||
smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
|
||||
|
||||
if (arm_smmu_ops.pgsize_bitmap == -1UL)
|
||||
arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
|
||||
else
|
||||
arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
|
||||
dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
|
||||
smmu->pgsize_bitmap);
|
||||
|
||||
|
||||
if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
|
||||
dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
|
||||
|
@ -1728,12 +1842,27 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct arm_smmu_match_data {
|
||||
enum arm_smmu_arch_version version;
|
||||
enum arm_smmu_implementation model;
|
||||
};
|
||||
|
||||
#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
|
||||
static struct arm_smmu_match_data name = { .version = ver, .model = imp }
|
||||
|
||||
ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
|
||||
ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
|
||||
ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
|
||||
ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
|
||||
ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
|
||||
|
||||
static const struct of_device_id arm_smmu_of_match[] = {
|
||||
{ .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
|
||||
{ .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
|
||||
{ .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
|
||||
{ .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
|
||||
{ .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
|
||||
{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
|
||||
{ .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
|
||||
{ .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
|
||||
{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
|
||||
{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
|
||||
{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
|
||||
|
@ -1741,6 +1870,7 @@ MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
|
|||
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
const struct arm_smmu_match_data *data;
|
||||
struct resource *res;
|
||||
struct arm_smmu_device *smmu;
|
||||
struct device *dev = &pdev->dev;
|
||||
|
@ -1756,7 +1886,9 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|||
smmu->dev = dev;
|
||||
|
||||
of_id = of_match_node(arm_smmu_of_match, dev->of_node);
|
||||
smmu->version = (enum arm_smmu_arch_version)of_id->data;
|
||||
data = of_id->data;
|
||||
smmu->version = data->version;
|
||||
smmu->model = data->model;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
smmu->base = devm_ioremap_resource(dev, res);
|
||||
|
@ -1822,7 +1954,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
|||
|
||||
parse_driver_options(smmu);
|
||||
|
||||
if (smmu->version > ARM_SMMU_V1 &&
|
||||
if (smmu->version == ARM_SMMU_V2 &&
|
||||
smmu->num_context_banks != smmu->num_context_irqs) {
|
||||
dev_err(dev,
|
||||
"found only %d context interrupt(s) but %d required\n",
|
||||
|
|
|
@ -94,7 +94,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size
|
|||
return -ENODEV;
|
||||
|
||||
/* Use the smallest supported page size for IOVA granularity */
|
||||
order = __ffs(domain->ops->pgsize_bitmap);
|
||||
order = __ffs(domain->pgsize_bitmap);
|
||||
base_pfn = max_t(unsigned long, 1, base >> order);
|
||||
end_pfn = (base + size - 1) >> order;
|
||||
|
||||
|
@ -190,11 +190,15 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
|
|||
kvfree(pages);
|
||||
}
|
||||
|
||||
static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
|
||||
static struct page **__iommu_dma_alloc_pages(unsigned int count,
|
||||
unsigned long order_mask, gfp_t gfp)
|
||||
{
|
||||
struct page **pages;
|
||||
unsigned int i = 0, array_size = count * sizeof(*pages);
|
||||
unsigned int order = MAX_ORDER;
|
||||
|
||||
order_mask &= (2U << MAX_ORDER) - 1;
|
||||
if (!order_mask)
|
||||
return NULL;
|
||||
|
||||
if (array_size <= PAGE_SIZE)
|
||||
pages = kzalloc(array_size, GFP_KERNEL);
|
||||
|
@ -208,36 +212,38 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
|
|||
|
||||
while (count) {
|
||||
struct page *page = NULL;
|
||||
int j;
|
||||
unsigned int order_size;
|
||||
|
||||
/*
|
||||
* Higher-order allocations are a convenience rather
|
||||
* than a necessity, hence using __GFP_NORETRY until
|
||||
* falling back to single-page allocations.
|
||||
* falling back to minimum-order allocations.
|
||||
*/
|
||||
for (order = min_t(unsigned int, order, __fls(count));
|
||||
order > 0; order--) {
|
||||
page = alloc_pages(gfp | __GFP_NORETRY, order);
|
||||
for (order_mask &= (2U << __fls(count)) - 1;
|
||||
order_mask; order_mask &= ~order_size) {
|
||||
unsigned int order = __fls(order_mask);
|
||||
|
||||
order_size = 1U << order;
|
||||
page = alloc_pages((order_mask - order_size) ?
|
||||
gfp | __GFP_NORETRY : gfp, order);
|
||||
if (!page)
|
||||
continue;
|
||||
if (PageCompound(page)) {
|
||||
if (!split_huge_page(page))
|
||||
break;
|
||||
__free_pages(page, order);
|
||||
} else {
|
||||
if (!order)
|
||||
break;
|
||||
if (!PageCompound(page)) {
|
||||
split_page(page, order);
|
||||
break;
|
||||
} else if (!split_huge_page(page)) {
|
||||
break;
|
||||
}
|
||||
__free_pages(page, order);
|
||||
}
|
||||
if (!page)
|
||||
page = alloc_page(gfp);
|
||||
if (!page) {
|
||||
__iommu_dma_free_pages(pages, i);
|
||||
return NULL;
|
||||
}
|
||||
j = 1 << order;
|
||||
count -= j;
|
||||
while (j--)
|
||||
count -= order_size;
|
||||
while (order_size--)
|
||||
pages[i++] = page++;
|
||||
}
|
||||
return pages;
|
||||
|
@ -267,6 +273,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
|||
* attached to an iommu_dma_domain
|
||||
* @size: Size of buffer in bytes
|
||||
* @gfp: Allocation flags
|
||||
* @attrs: DMA attributes for this allocation
|
||||
* @prot: IOMMU mapping flags
|
||||
* @handle: Out argument for allocated DMA handle
|
||||
* @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
|
||||
|
@ -278,8 +285,8 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
|||
* Return: Array of struct page pointers describing the buffer,
|
||||
* or NULL on failure.
|
||||
*/
|
||||
struct page **iommu_dma_alloc(struct device *dev, size_t size,
|
||||
gfp_t gfp, int prot, dma_addr_t *handle,
|
||||
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
struct dma_attrs *attrs, int prot, dma_addr_t *handle,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t))
|
||||
{
|
||||
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
|
||||
|
@ -288,11 +295,22 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size,
|
|||
struct page **pages;
|
||||
struct sg_table sgt;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
|
||||
|
||||
*handle = DMA_ERROR_CODE;
|
||||
|
||||
pages = __iommu_dma_alloc_pages(count, gfp);
|
||||
min_size = alloc_sizes & -alloc_sizes;
|
||||
if (min_size < PAGE_SIZE) {
|
||||
min_size = PAGE_SIZE;
|
||||
alloc_sizes |= PAGE_SIZE;
|
||||
} else {
|
||||
size = ALIGN(size, min_size);
|
||||
}
|
||||
if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
|
||||
alloc_sizes = min_size;
|
||||
|
||||
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
|
@ -389,26 +407,58 @@ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
|
|||
|
||||
/*
|
||||
* Prepare a successfully-mapped scatterlist to give back to the caller.
|
||||
* Handling IOVA concatenation can come later, if needed
|
||||
*
|
||||
* At this point the segments are already laid out by iommu_dma_map_sg() to
|
||||
* avoid individually crossing any boundaries, so we merely need to check a
|
||||
* segment's start address to avoid concatenating across one.
|
||||
*/
|
||||
static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
struct scatterlist *s, *cur = sg;
|
||||
unsigned long seg_mask = dma_get_seg_boundary(dev);
|
||||
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
|
||||
int i, count = 0;
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
/* Un-swizzling the fields here, hence the naming mismatch */
|
||||
unsigned int s_offset = sg_dma_address(s);
|
||||
/* Restore this segment's original unaligned fields first */
|
||||
unsigned int s_iova_off = sg_dma_address(s);
|
||||
unsigned int s_length = sg_dma_len(s);
|
||||
unsigned int s_dma_len = s->length;
|
||||
unsigned int s_iova_len = s->length;
|
||||
|
||||
s->offset += s_offset;
|
||||
s->offset += s_iova_off;
|
||||
s->length = s_length;
|
||||
sg_dma_address(s) = dma_addr + s_offset;
|
||||
dma_addr += s_dma_len;
|
||||
sg_dma_address(s) = DMA_ERROR_CODE;
|
||||
sg_dma_len(s) = 0;
|
||||
|
||||
/*
|
||||
* Now fill in the real DMA data. If...
|
||||
* - there is a valid output segment to append to
|
||||
* - and this segment starts on an IOVA page boundary
|
||||
* - but doesn't fall at a segment boundary
|
||||
* - and wouldn't make the resulting output segment too long
|
||||
*/
|
||||
if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
|
||||
(cur_len + s_length <= max_len)) {
|
||||
/* ...then concatenate it with the previous one */
|
||||
cur_len += s_length;
|
||||
} else {
|
||||
/* Otherwise start the next output segment */
|
||||
if (i > 0)
|
||||
cur = sg_next(cur);
|
||||
cur_len = s_length;
|
||||
count++;
|
||||
|
||||
sg_dma_address(cur) = dma_addr + s_iova_off;
|
||||
}
|
||||
|
||||
sg_dma_len(cur) = cur_len;
|
||||
dma_addr += s_iova_len;
|
||||
|
||||
if (s_length + s_iova_off < s_iova_len)
|
||||
cur_len = 0;
|
||||
}
|
||||
return i;
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -446,34 +496,40 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
struct scatterlist *s, *prev = NULL;
|
||||
dma_addr_t dma_addr;
|
||||
size_t iova_len = 0;
|
||||
unsigned long mask = dma_get_seg_boundary(dev);
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Work out how much IOVA space we need, and align the segments to
|
||||
* IOVA granules for the IOMMU driver to handle. With some clever
|
||||
* trickery we can modify the list in-place, but reversibly, by
|
||||
* hiding the original data in the as-yet-unused DMA fields.
|
||||
* stashing the unaligned parts in the as-yet-unused DMA fields.
|
||||
*/
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
size_t s_offset = iova_offset(iovad, s->offset);
|
||||
size_t s_iova_off = iova_offset(iovad, s->offset);
|
||||
size_t s_length = s->length;
|
||||
size_t pad_len = (mask - iova_len + 1) & mask;
|
||||
|
||||
sg_dma_address(s) = s_offset;
|
||||
sg_dma_address(s) = s_iova_off;
|
||||
sg_dma_len(s) = s_length;
|
||||
s->offset -= s_offset;
|
||||
s_length = iova_align(iovad, s_length + s_offset);
|
||||
s->offset -= s_iova_off;
|
||||
s_length = iova_align(iovad, s_length + s_iova_off);
|
||||
s->length = s_length;
|
||||
|
||||
/*
|
||||
* The simple way to avoid the rare case of a segment
|
||||
* crossing the boundary mask is to pad the previous one
|
||||
* to end at a naturally-aligned IOVA for this one's size,
|
||||
* at the cost of potentially over-allocating a little.
|
||||
* Due to the alignment of our single IOVA allocation, we can
|
||||
* depend on these assumptions about the segment boundary mask:
|
||||
* - If mask size >= IOVA size, then the IOVA range cannot
|
||||
* possibly fall across a boundary, so we don't care.
|
||||
* - If mask size < IOVA size, then the IOVA range must start
|
||||
* exactly on a boundary, therefore we can lay things out
|
||||
* based purely on segment lengths without needing to know
|
||||
* the actual addresses beforehand.
|
||||
* - The mask must be a power of 2, so pad_len == 0 if
|
||||
* iova_len == 0, thus we cannot dereference prev the first
|
||||
* time through here (i.e. before it has a meaningful value).
|
||||
*/
|
||||
if (prev) {
|
||||
size_t pad_len = roundup_pow_of_two(s_length);
|
||||
|
||||
pad_len = (pad_len - iova_len) & (pad_len - 1);
|
||||
if (pad_len && pad_len < s_length - 1) {
|
||||
prev->length += pad_len;
|
||||
iova_len += pad_len;
|
||||
}
|
||||
|
|
|
@ -1579,18 +1579,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
|
|||
reason = dmar_get_fault_reason(fault_reason, &fault_type);
|
||||
|
||||
if (fault_type == INTR_REMAP)
|
||||
pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
|
||||
"fault index %llx\n"
|
||||
"INTR-REMAP:[fault reason %02d] %s\n",
|
||||
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
|
||||
pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index %llx [fault reason %02d] %s\n",
|
||||
source_id >> 8, PCI_SLOT(source_id & 0xFF),
|
||||
PCI_FUNC(source_id & 0xFF), addr >> 48,
|
||||
fault_reason, reason);
|
||||
else
|
||||
pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
|
||||
"fault addr %llx \n"
|
||||
"DMAR:[fault reason %02d] %s\n",
|
||||
(type ? "DMA Read" : "DMA Write"),
|
||||
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
|
||||
pr_err("[%s] Request device [%02x:%02x.%d] fault addr %llx [fault reason %02d] %s\n",
|
||||
type ? "DMA Read" : "DMA Write",
|
||||
source_id >> 8, PCI_SLOT(source_id & 0xFF),
|
||||
PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1602,10 +1598,17 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
|
|||
int reg, fault_index;
|
||||
u32 fault_status;
|
||||
unsigned long flag;
|
||||
bool ratelimited;
|
||||
static DEFINE_RATELIMIT_STATE(rs,
|
||||
DEFAULT_RATELIMIT_INTERVAL,
|
||||
DEFAULT_RATELIMIT_BURST);
|
||||
|
||||
/* Disable printing, simply clear the fault when ratelimited */
|
||||
ratelimited = !__ratelimit(&rs);
|
||||
|
||||
raw_spin_lock_irqsave(&iommu->register_lock, flag);
|
||||
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
|
||||
if (fault_status)
|
||||
if (fault_status && !ratelimited)
|
||||
pr_err("DRHD: handling fault status reg %x\n", fault_status);
|
||||
|
||||
/* TBD: ignore advanced fault log currently */
|
||||
|
@ -1627,24 +1630,28 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
|
|||
if (!(data & DMA_FRCD_F))
|
||||
break;
|
||||
|
||||
fault_reason = dma_frcd_fault_reason(data);
|
||||
type = dma_frcd_type(data);
|
||||
if (!ratelimited) {
|
||||
fault_reason = dma_frcd_fault_reason(data);
|
||||
type = dma_frcd_type(data);
|
||||
|
||||
data = readl(iommu->reg + reg +
|
||||
fault_index * PRIMARY_FAULT_REG_LEN + 8);
|
||||
source_id = dma_frcd_source_id(data);
|
||||
data = readl(iommu->reg + reg +
|
||||
fault_index * PRIMARY_FAULT_REG_LEN + 8);
|
||||
source_id = dma_frcd_source_id(data);
|
||||
|
||||
guest_addr = dmar_readq(iommu->reg + reg +
|
||||
fault_index * PRIMARY_FAULT_REG_LEN);
|
||||
guest_addr = dma_frcd_page_addr(guest_addr);
|
||||
}
|
||||
|
||||
guest_addr = dmar_readq(iommu->reg + reg +
|
||||
fault_index * PRIMARY_FAULT_REG_LEN);
|
||||
guest_addr = dma_frcd_page_addr(guest_addr);
|
||||
/* clear the fault */
|
||||
writel(DMA_FRCD_F, iommu->reg + reg +
|
||||
fault_index * PRIMARY_FAULT_REG_LEN + 12);
|
||||
|
||||
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
|
||||
|
||||
dmar_fault_do_one(iommu, type, fault_reason,
|
||||
source_id, guest_addr);
|
||||
if (!ratelimited)
|
||||
dmar_fault_do_one(iommu, type, fault_reason,
|
||||
source_id, guest_addr);
|
||||
|
||||
fault_index++;
|
||||
if (fault_index >= cap_num_fault_regs(iommu->cap))
|
||||
|
|
|
@ -1143,7 +1143,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
|
|||
} while (!first_pte_in_page(++pte) && pfn <= last_pfn);
|
||||
}
|
||||
|
||||
/* free page table pages. last level pte should already be cleared */
|
||||
/* clear last level (leaf) ptes and free page table pages. */
|
||||
static void dma_pte_free_pagetable(struct dmar_domain *domain,
|
||||
unsigned long start_pfn,
|
||||
unsigned long last_pfn)
|
||||
|
|
|
@ -121,6 +121,8 @@
|
|||
#define ARM_V7S_TEX_MASK 0x7
|
||||
#define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
|
||||
|
||||
#define ARM_V7S_ATTR_MTK_4GB BIT(9) /* MTK extend it for 4GB mode */
|
||||
|
||||
/* *well, except for TEX on level 2 large pages, of course :( */
|
||||
#define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
|
||||
#define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
|
||||
|
@ -258,9 +260,10 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
|
|||
struct io_pgtable_cfg *cfg)
|
||||
{
|
||||
bool ap = !(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS);
|
||||
arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S |
|
||||
ARM_V7S_ATTR_TEX(1);
|
||||
arm_v7s_iopte pte = ARM_V7S_ATTR_NG | ARM_V7S_ATTR_S;
|
||||
|
||||
if (!(prot & IOMMU_MMIO))
|
||||
pte |= ARM_V7S_ATTR_TEX(1);
|
||||
if (ap) {
|
||||
pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
|
||||
if (!(prot & IOMMU_WRITE))
|
||||
|
@ -270,7 +273,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
|
|||
|
||||
if ((prot & IOMMU_NOEXEC) && ap)
|
||||
pte |= ARM_V7S_ATTR_XN(lvl);
|
||||
if (prot & IOMMU_CACHE)
|
||||
if (prot & IOMMU_MMIO)
|
||||
pte |= ARM_V7S_ATTR_B;
|
||||
else if (prot & IOMMU_CACHE)
|
||||
pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C;
|
||||
|
||||
return pte;
|
||||
|
@ -279,10 +284,13 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
|
|||
static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
|
||||
{
|
||||
int prot = IOMMU_READ;
|
||||
arm_v7s_iopte attr = pte >> ARM_V7S_ATTR_SHIFT(lvl);
|
||||
|
||||
if (pte & (ARM_V7S_PTE_AP_RDONLY << ARM_V7S_ATTR_SHIFT(lvl)))
|
||||
if (attr & ARM_V7S_PTE_AP_RDONLY)
|
||||
prot |= IOMMU_WRITE;
|
||||
if (pte & ARM_V7S_ATTR_C)
|
||||
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
|
||||
prot |= IOMMU_MMIO;
|
||||
else if (pte & ARM_V7S_ATTR_C)
|
||||
prot |= IOMMU_CACHE;
|
||||
|
||||
return prot;
|
||||
|
@ -364,6 +372,9 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data,
|
|||
if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS))
|
||||
pte |= ARM_V7S_ATTR_NS_SECTION;
|
||||
|
||||
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB)
|
||||
pte |= ARM_V7S_ATTR_MTK_4GB;
|
||||
|
||||
if (num_entries > 1)
|
||||
pte = arm_v7s_pte_to_cont(pte, lvl);
|
||||
|
||||
|
@ -625,9 +636,15 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
|
|||
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
|
||||
IO_PGTABLE_QUIRK_NO_PERMS |
|
||||
IO_PGTABLE_QUIRK_TLBI_ON_MAP))
|
||||
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
|
||||
IO_PGTABLE_QUIRK_ARM_MTK_4GB))
|
||||
return NULL;
|
||||
|
||||
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
|
||||
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB &&
|
||||
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS))
|
||||
return NULL;
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return NULL;
|
||||
|
|
|
@ -355,7 +355,10 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
|||
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
|
||||
pte |= ARM_LPAE_PTE_AP_RDONLY;
|
||||
|
||||
if (prot & IOMMU_CACHE)
|
||||
if (prot & IOMMU_MMIO)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
else if (prot & IOMMU_CACHE)
|
||||
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
} else {
|
||||
|
@ -364,7 +367,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
|||
pte |= ARM_LPAE_PTE_HAP_READ;
|
||||
if (prot & IOMMU_WRITE)
|
||||
pte |= ARM_LPAE_PTE_HAP_WRITE;
|
||||
if (prot & IOMMU_CACHE)
|
||||
if (prot & IOMMU_MMIO)
|
||||
pte |= ARM_LPAE_PTE_MEMATTR_DEV;
|
||||
else if (prot & IOMMU_CACHE)
|
||||
pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
|
||||
else
|
||||
pte |= ARM_LPAE_PTE_MEMATTR_NC;
|
||||
|
|
|
@ -25,8 +25,7 @@
|
|||
#include "io-pgtable.h"
|
||||
|
||||
static const struct io_pgtable_init_fns *
|
||||
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
|
||||
{
|
||||
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
|
||||
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
|
||||
[ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
|
||||
[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
|
||||
|
|
|
@ -60,10 +60,16 @@ struct io_pgtable_cfg {
|
|||
* IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid
|
||||
* (unmapped) entries but the hardware might do so anyway, perform
|
||||
* TLB maintenance when mapping as well as when unmapping.
|
||||
*
|
||||
* IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all
|
||||
* PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
|
||||
* when the SoC is in "4GB mode" and they can only access the high
|
||||
* remap of DRAM (0x1_00000000 to 0x1_ffffffff).
|
||||
*/
|
||||
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
|
||||
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
|
||||
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
|
||||
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
|
||||
unsigned long quirks;
|
||||
unsigned long pgsize_bitmap;
|
||||
unsigned int ias;
|
||||
|
|
|
@ -337,9 +337,9 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
|
|||
if (!domain || domain->type != IOMMU_DOMAIN_DMA)
|
||||
return 0;
|
||||
|
||||
BUG_ON(!domain->ops->pgsize_bitmap);
|
||||
BUG_ON(!domain->pgsize_bitmap);
|
||||
|
||||
pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap);
|
||||
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
|
||||
INIT_LIST_HEAD(&mappings);
|
||||
|
||||
iommu_get_dm_regions(dev, &mappings);
|
||||
|
@ -1069,6 +1069,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
|
|||
|
||||
domain->ops = bus->iommu_ops;
|
||||
domain->type = type;
|
||||
/* Assume all sizes by default; the driver may override this later */
|
||||
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
@ -1293,7 +1295,7 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
|
|||
pgsize = (1UL << (pgsize_idx + 1)) - 1;
|
||||
|
||||
/* throw away page sizes not supported by the hardware */
|
||||
pgsize &= domain->ops->pgsize_bitmap;
|
||||
pgsize &= domain->pgsize_bitmap;
|
||||
|
||||
/* make sure we're still sane */
|
||||
BUG_ON(!pgsize);
|
||||
|
@ -1315,14 +1317,14 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
|
|||
int ret = 0;
|
||||
|
||||
if (unlikely(domain->ops->map == NULL ||
|
||||
domain->ops->pgsize_bitmap == 0UL))
|
||||
domain->pgsize_bitmap == 0UL))
|
||||
return -ENODEV;
|
||||
|
||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||
return -EINVAL;
|
||||
|
||||
/* find out the minimum page size supported */
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
|
||||
|
||||
/*
|
||||
* both the virtual address and the physical one, as well as
|
||||
|
@ -1369,14 +1371,14 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
|
|||
unsigned long orig_iova = iova;
|
||||
|
||||
if (unlikely(domain->ops->unmap == NULL ||
|
||||
domain->ops->pgsize_bitmap == 0UL))
|
||||
domain->pgsize_bitmap == 0UL))
|
||||
return -ENODEV;
|
||||
|
||||
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
|
||||
return -EINVAL;
|
||||
|
||||
/* find out the minimum page size supported */
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
|
||||
|
||||
/*
|
||||
* The virtual address, as well as the size of the mapping, must be
|
||||
|
@ -1422,10 +1424,10 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
unsigned int i, min_pagesz;
|
||||
int ret;
|
||||
|
||||
if (unlikely(domain->ops->pgsize_bitmap == 0UL))
|
||||
if (unlikely(domain->pgsize_bitmap == 0UL))
|
||||
return 0;
|
||||
|
||||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||
|
@ -1506,7 +1508,7 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
|
|||
break;
|
||||
case DOMAIN_ATTR_PAGING:
|
||||
paging = data;
|
||||
*paging = (domain->ops->pgsize_bitmap != 0UL);
|
||||
*paging = (domain->pgsize_bitmap != 0UL);
|
||||
break;
|
||||
case DOMAIN_ATTR_WINDOWS:
|
||||
count = data;
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/component.h>
|
||||
|
@ -56,7 +57,7 @@
|
|||
#define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
|
||||
|
||||
#define REG_MMU_IVRP_PADDR 0x114
|
||||
#define F_MMU_IVRP_PA_SET(pa) ((pa) >> 1)
|
||||
#define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
|
||||
|
||||
#define REG_MMU_INT_CONTROL0 0x120
|
||||
#define F_L2_MULIT_HIT_EN BIT(0)
|
||||
|
@ -125,6 +126,7 @@ struct mtk_iommu_data {
|
|||
struct mtk_iommu_domain *m4u_dom;
|
||||
struct iommu_group *m4u_group;
|
||||
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
|
||||
bool enable_4GB;
|
||||
};
|
||||
|
||||
static struct iommu_ops mtk_iommu_ops;
|
||||
|
@ -257,6 +259,9 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
|
|||
.iommu_dev = data->dev,
|
||||
};
|
||||
|
||||
if (data->enable_4GB)
|
||||
dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
|
||||
|
||||
dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
|
||||
if (!dom->iop) {
|
||||
dev_err(data->dev, "Failed to alloc io pgtable\n");
|
||||
|
@ -264,7 +269,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_data *data)
|
|||
}
|
||||
|
||||
/* Update our support page sizes bitmap */
|
||||
mtk_iommu_ops.pgsize_bitmap = dom->cfg.pgsize_bitmap;
|
||||
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
|
||||
|
||||
writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
|
||||
data->base + REG_MMU_PT_BASE_ADDR);
|
||||
|
@ -530,7 +535,7 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
|
|||
F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
|
||||
writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
|
||||
|
||||
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
|
||||
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
|
||||
data->base + REG_MMU_IVRP_PADDR);
|
||||
|
||||
writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
|
||||
|
@ -591,6 +596,9 @@ static int mtk_iommu_probe(struct platform_device *pdev)
|
|||
return -ENOMEM;
|
||||
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
|
||||
|
||||
/* Whether the current dram is over 4GB */
|
||||
data->enable_4GB = !!(max_pfn > (0xffffffffUL >> PAGE_SHIFT));
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
data->base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(data->base))
|
||||
|
@ -690,7 +698,7 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev)
|
|||
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
|
||||
writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
|
||||
writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
|
||||
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base),
|
||||
writel_relaxed(F_MMU_IVRP_PA_SET(data->protect_base, data->enable_4GB),
|
||||
base + REG_MMU_IVRP_PADDR);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -98,12 +98,12 @@ EXPORT_SYMBOL_GPL(of_get_dma_window);
|
|||
struct of_iommu_node {
|
||||
struct list_head list;
|
||||
struct device_node *np;
|
||||
struct iommu_ops *ops;
|
||||
const struct iommu_ops *ops;
|
||||
};
|
||||
static LIST_HEAD(of_iommu_list);
|
||||
static DEFINE_SPINLOCK(of_iommu_lock);
|
||||
|
||||
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
|
||||
void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops)
|
||||
{
|
||||
struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
|
||||
|
||||
|
@ -119,10 +119,10 @@ void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops)
|
|||
spin_unlock(&of_iommu_lock);
|
||||
}
|
||||
|
||||
struct iommu_ops *of_iommu_get_ops(struct device_node *np)
|
||||
const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
|
||||
{
|
||||
struct of_iommu_node *node;
|
||||
struct iommu_ops *ops = NULL;
|
||||
const struct iommu_ops *ops = NULL;
|
||||
|
||||
spin_lock(&of_iommu_lock);
|
||||
list_for_each_entry(node, &of_iommu_list, list)
|
||||
|
@ -134,12 +134,12 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
|
|||
return ops;
|
||||
}
|
||||
|
||||
struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
struct device_node *master_np)
|
||||
const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
struct device_node *master_np)
|
||||
{
|
||||
struct of_phandle_args iommu_spec;
|
||||
struct device_node *np;
|
||||
struct iommu_ops *ops = NULL;
|
||||
const struct iommu_ops *ops = NULL;
|
||||
int idx = 0;
|
||||
|
||||
/*
|
||||
|
|
|
@ -136,7 +136,7 @@ static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
|
|||
struct seq_file *s)
|
||||
{
|
||||
seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
|
||||
(cr->cam & MMU_CAM_P) ? 1 : 0);
|
||||
(cr->cam & MMU_CAM_P) ? 1 : 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -628,10 +628,12 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
|
|||
break;
|
||||
default:
|
||||
fn = NULL;
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
if (WARN_ON(!fn))
|
||||
return -EINVAL;
|
||||
|
||||
prot = get_iopte_attr(e);
|
||||
|
||||
spin_lock(&obj->page_table_lock);
|
||||
|
@ -987,7 +989,6 @@ static int omap_iommu_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct omap_iommu *obj = platform_get_drvdata(pdev);
|
||||
|
||||
iopgtable_clear_entry_all(obj);
|
||||
omap_iommu_debugfs_remove(obj);
|
||||
|
||||
pm_runtime_disable(obj->dev);
|
||||
|
@ -1161,7 +1162,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
|
|||
* should never fail, but please keep this around to ensure
|
||||
* we keep the hardware happy
|
||||
*/
|
||||
BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
|
||||
if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
|
||||
goto fail_align;
|
||||
|
||||
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
|
||||
spin_lock_init(&omap_domain->lock);
|
||||
|
@ -1172,6 +1174,8 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
|
|||
|
||||
return &omap_domain->domain;
|
||||
|
||||
fail_align:
|
||||
kfree(omap_domain->pgtable);
|
||||
fail_nomem:
|
||||
kfree(omap_domain);
|
||||
out:
|
||||
|
|
|
@ -1049,6 +1049,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
|
|||
|
||||
for (i = 0; i < pdev->num_resources; i++) {
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
|
||||
if (!res)
|
||||
continue;
|
||||
iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(iommu->bases[i]))
|
||||
continue;
|
||||
|
|
|
@ -88,7 +88,7 @@ void of_dma_configure(struct device *dev, struct device_node *np)
|
|||
int ret;
|
||||
bool coherent;
|
||||
unsigned long offset;
|
||||
struct iommu_ops *iommu;
|
||||
const struct iommu_ops *iommu;
|
||||
|
||||
/*
|
||||
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
|
||||
|
|
|
@ -407,7 +407,7 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
|
|||
|
||||
mutex_lock(&iommu->lock);
|
||||
list_for_each_entry(domain, &iommu->domain_list, next)
|
||||
bitmap &= domain->domain->ops->pgsize_bitmap;
|
||||
bitmap &= domain->domain->pgsize_bitmap;
|
||||
mutex_unlock(&iommu->lock);
|
||||
|
||||
/*
|
||||
|
|
|
@ -191,7 +191,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
|
|||
#define readl_relaxed readl
|
||||
#endif
|
||||
|
||||
#ifndef readq_relaxed
|
||||
#if defined(readq) && !defined(readq_relaxed)
|
||||
#define readq_relaxed readq
|
||||
#endif
|
||||
|
||||
|
@ -207,7 +207,7 @@ static inline void writeq(u64 value, volatile void __iomem *addr)
|
|||
#define writel_relaxed writel
|
||||
#endif
|
||||
|
||||
#ifndef writeq_relaxed
|
||||
#if defined(writeq) && !defined(writeq_relaxed)
|
||||
#define writeq_relaxed writeq
|
||||
#endif
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
|
|||
* These implement the bulk of the relevant DMA mapping callbacks, but require
|
||||
* the arch code to take care of attributes and cache maintenance
|
||||
*/
|
||||
struct page **iommu_dma_alloc(struct device *dev, size_t size,
|
||||
gfp_t gfp, int prot, dma_addr_t *handle,
|
||||
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
|
||||
struct dma_attrs *attrs, int prot, dma_addr_t *handle,
|
||||
void (*flush_page)(struct device *, const void *, phys_addr_t));
|
||||
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
|
||||
dma_addr_t *handle);
|
||||
|
|
|
@ -514,7 +514,7 @@ extern u64 dma_get_required_mask(struct device *dev);
|
|||
|
||||
#ifndef arch_setup_dma_ops
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
|
||||
u64 size, struct iommu_ops *iommu,
|
||||
u64 size, const struct iommu_ops *iommu,
|
||||
bool coherent) { }
|
||||
#endif
|
||||
|
||||
|
|
|
@ -21,6 +21,23 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
|
|||
writel(val, addr);
|
||||
}
|
||||
|
||||
static inline __u64 hi_lo_readq_relaxed(const volatile void __iomem *addr)
|
||||
{
|
||||
const volatile u32 __iomem *p = addr;
|
||||
u32 low, high;
|
||||
|
||||
high = readl_relaxed(p + 1);
|
||||
low = readl_relaxed(p);
|
||||
|
||||
return low + ((u64)high << 32);
|
||||
}
|
||||
|
||||
static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
writel_relaxed(val >> 32, addr + 4);
|
||||
writel_relaxed(val, addr);
|
||||
}
|
||||
|
||||
#ifndef readq
|
||||
#define readq hi_lo_readq
|
||||
#endif
|
||||
|
@ -29,4 +46,12 @@ static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
|
|||
#define writeq hi_lo_writeq
|
||||
#endif
|
||||
|
||||
#ifndef readq_relaxed
|
||||
#define readq_relaxed hi_lo_readq_relaxed
|
||||
#endif
|
||||
|
||||
#ifndef writeq_relaxed
|
||||
#define writeq_relaxed hi_lo_writeq_relaxed
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */
|
||||
|
|
|
@ -21,6 +21,23 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
|
|||
writel(val >> 32, addr + 4);
|
||||
}
|
||||
|
||||
static inline __u64 lo_hi_readq_relaxed(const volatile void __iomem *addr)
|
||||
{
|
||||
const volatile u32 __iomem *p = addr;
|
||||
u32 low, high;
|
||||
|
||||
low = readl_relaxed(p);
|
||||
high = readl_relaxed(p + 1);
|
||||
|
||||
return low + ((u64)high << 32);
|
||||
}
|
||||
|
||||
static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
writel_relaxed(val, addr);
|
||||
writel_relaxed(val >> 32, addr + 4);
|
||||
}
|
||||
|
||||
#ifndef readq
|
||||
#define readq lo_hi_readq
|
||||
#endif
|
||||
|
@ -29,4 +46,12 @@ static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
|
|||
#define writeq lo_hi_writeq
|
||||
#endif
|
||||
|
||||
#ifndef readq_relaxed
|
||||
#define readq_relaxed lo_hi_readq_relaxed
|
||||
#endif
|
||||
|
||||
#ifndef writeq_relaxed
|
||||
#define writeq_relaxed lo_hi_writeq_relaxed
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#define IOMMU_WRITE (1 << 1)
|
||||
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
|
||||
#define IOMMU_NOEXEC (1 << 3)
|
||||
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
|
||||
|
||||
struct iommu_ops;
|
||||
struct iommu_group;
|
||||
|
@ -78,6 +79,7 @@ struct iommu_domain_geometry {
|
|||
struct iommu_domain {
|
||||
unsigned type;
|
||||
const struct iommu_ops *ops;
|
||||
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
|
||||
iommu_fault_handler_t handler;
|
||||
void *handler_token;
|
||||
struct iommu_domain_geometry geometry;
|
||||
|
@ -155,8 +157,7 @@ struct iommu_dm_region {
|
|||
* @domain_set_windows: Set the number of windows for a domain
|
||||
* @domain_get_windows: Return the number of windows for a domain
|
||||
* @of_xlate: add OF master IDs to iommu grouping
|
||||
* @pgsize_bitmap: bitmap of supported page sizes
|
||||
* @priv: per-instance data private to the iommu driver
|
||||
* @pgsize_bitmap: bitmap of all possible supported page sizes
|
||||
*/
|
||||
struct iommu_ops {
|
||||
bool (*capable)(enum iommu_cap);
|
||||
|
@ -198,7 +199,6 @@ struct iommu_ops {
|
|||
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
|
||||
|
||||
unsigned long pgsize_bitmap;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
|
||||
|
|
|
@ -12,7 +12,7 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
|
|||
size_t *size);
|
||||
|
||||
extern void of_iommu_init(void);
|
||||
extern struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
extern const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
struct device_node *master_np);
|
||||
|
||||
#else
|
||||
|
@ -25,7 +25,7 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
|
|||
}
|
||||
|
||||
static inline void of_iommu_init(void) { }
|
||||
static inline struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
struct device_node *master_np)
|
||||
{
|
||||
return NULL;
|
||||
|
@ -33,8 +33,8 @@ static inline struct iommu_ops *of_iommu_configure(struct device *dev,
|
|||
|
||||
#endif /* CONFIG_OF_IOMMU */
|
||||
|
||||
void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
|
||||
struct iommu_ops *of_iommu_get_ops(struct device_node *np);
|
||||
void of_iommu_set_ops(struct device_node *np, const struct iommu_ops *ops);
|
||||
const struct iommu_ops *of_iommu_get_ops(struct device_node *np);
|
||||
|
||||
extern struct of_device_id __iommu_of_table;
|
||||
|
||||
|
|
Loading…
Reference in New Issue