mirror of https://gitee.com/openkylin/linux.git
IOMMU Updates for Linux v3.17
This time with: * Support for the generic PCI device alias code in x86 IOMMU drivers * A new sysfs interface for IOMMUs * Preparations for hotplug support in the Intel IOMMU driver * Change the AMD IOMMUv2 driver to not hold references to core data structures like mm_struct or task_struct. Rely on mmu_notifers instead. * Removal of the OMAP IOVMM interface, all users of it are converted to DMA-API now * Make the struct iommu_ops const everywhere * Initial PCI support for the ARM SMMU driver * There is now a generic device tree binding documented for ARM IOMMUs * Various fixes and cleanups all over the place Also included are some changes to the OMAP code, which are acked by the maintainer. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQIcBAABAgAGBQJT4OVjAAoJECvwRC2XARrjHmsP/23svgRbCyajL4Aov1Tk0YLE FkUhhXvgw6fex+dubsHYL24ALkja8MkucI4g7nbCvtb0hwcaaDYHR3NniCWzIEB6 /83B54v1OPxRGycyjaXxCpLTZOb7PV+9ALATGwpdxgVh8M8RXqSyxjEOq/sKQd+i 9hbLd/XFAyrucjJuiG1V8MRdymuBIGwHqX5jwi2cl0IaQ6+WUCayU6F+0qYmmdDo xNYJHvGz6stUTtHWTlQwMgCUamgm8ZhHr02KHWqeqZreggqAucJcNKqaaLJd3A8g ZoNCqZwbfFYsfzXtjkIwTEej85hnmw1mx+GtNWm9WOemTrTJB91O3xOAPFqr+eJm qqUpXd+vpPOiXuNSttj/pi6ou6xFf8IcAQnr7A5oWDK3Sy+N11BEhPCA8sfTWa47 Hqu1B1lj6ayTe6iWQENosbnpsT4zVpaJiDRF+jkzws0nYM3Fb3s6spRHb+OTPkmR JG85VceoPGNSZIZVS6QlUmUirM4ThLNybcspoLY31Yrs/eYapWHzfEZ6B2c0Ku+Y BL11RlC0LHjcOedmBeSSqzp6TuXS2uMrGV4Hlc3DhXJplAE/hg1z3AR7iEPXHVcQ CQMdr+TS+NcRreS8TeiCTnovD6Vd1nwV8qqOf+3bHOQbf4wEjXr2xxVCNSAmLM53 ZQn5PxrLzzUKFWLTrqHd =ChW/ -----END PGP SIGNATURE----- Merge tag 'iommu-updates-v3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull iommu updates from Joerg Roedel: "This time with: - support for the generic PCI device alias code in x86 IOMMU drivers - a new sysfs interface for IOMMUs - preparations for hotplug support in the Intel IOMMU driver - change the AMD IOMMUv2 driver to not hold references to core data structures like mm_struct or task_struct. Rely on mmu_notifers instead. - removal of the OMAP IOVMM interface, all users of it are converted to DMA-API now - make the struct iommu_ops const everywhere - initial PCI support for the ARM SMMU driver - there is now a generic device tree binding documented for ARM IOMMUs - various fixes and cleanups all over the place Also included are some changes to the OMAP code, which are acked by the maintainer" * tag 'iommu-updates-v3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (67 commits) devicetree: Add generic IOMMU device tree bindings iommu/vt-d: Fix race setting IRQ CPU affinity while freeing IRQ iommu/amd: Fix 2 typos in comments iommu/amd: Fix device_state reference counting iommu/amd: Remove change_pte mmu_notifier call-back iommu/amd: Don't set pasid_state->mm to NULL in unbind_pasid iommu/exynos: Select ARM_DMA_USE_IOMMU iommu/vt-d: Exclude devices using RMRRs from IOMMU API domains iommu/omap: Remove platform data da_start and da_end fields ARM: omap: Don't set iommu pdata da_start and da_end fields iommu/omap: Remove virtual memory manager iommu/vt-d: Fix issue in computing domain's iommu_snooping flag iommu/vt-d: Introduce helper function iova_size() to improve code readability iommu/vt-d: Introduce helper domain_pfn_within_range() to simplify code iommu/vt-d: Simplify intel_unmap_sg() and kill duplicated code iommu/vt-d: Change iommu_enable/disable_translation to return void iommu/vt-d: Simplify include/linux/dmar.h iommu/vt-d: Avoid freeing virtual machine domain in free_dmar_iommu() iommu/vt-d: Fix possible invalid memory access caused by free_dmar_iommu() iommu/vt-d: Allocate dynamic domain id for virtual domains only ...
This commit is contained in:
commit
dc7aafba6b
|
@ -0,0 +1,17 @@
|
|||
What: /sys/class/iommu/<iommu>/devices/
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
IOMMU drivers are able to link devices managed by a
|
||||
given IOMMU here to allow association of IOMMU to
|
||||
device.
|
||||
|
||||
What: /sys/devices/.../iommu
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
IOMMU drivers are able to link the IOMMU for a
|
||||
given device here to allow association of device to
|
||||
IOMMU.
|
|
@ -0,0 +1,14 @@
|
|||
What: /sys/class/iommu/<iommu>/amd-iommu/cap
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
IOMMU capability header as documented in the AMD IOMMU
|
||||
specification. Format: %x
|
||||
|
||||
What: /sys/class/iommu/<iommu>/amd-iommu/features
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
Extended features of the IOMMU. Format: %llx
|
|
@ -0,0 +1,32 @@
|
|||
What: /sys/class/iommu/<iommu>/intel-iommu/address
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
Physical address of the VT-d DRHD for this IOMMU.
|
||||
Format: %llx. This allows association of a sysfs
|
||||
intel-iommu with a DMAR DRHD table entry.
|
||||
|
||||
What: /sys/class/iommu/<iommu>/intel-iommu/cap
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
The cached hardware capability register value
|
||||
of this DRHD unit. Format: %llx.
|
||||
|
||||
What: /sys/class/iommu/<iommu>/intel-iommu/ecap
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
The cached hardware extended capability register
|
||||
value of this DRHD unit. Format: %llx.
|
||||
|
||||
What: /sys/class/iommu/<iommu>/intel-iommu/version
|
||||
Date: June 2014
|
||||
KernelVersion: 3.17
|
||||
Contact: Alex Williamson <alex.williamson@redhat.com>
|
||||
Description:
|
||||
The architecture version as reported from the
|
||||
VT-d VER_REG. Format: %d:%d, major:minor
|
|
@ -42,12 +42,6 @@ conditions.
|
|||
|
||||
** System MMU optional properties:
|
||||
|
||||
- smmu-parent : When multiple SMMUs are chained together, this
|
||||
property can be used to provide a phandle to the
|
||||
parent SMMU (that is the next SMMU on the path going
|
||||
from the mmu-masters towards memory) node for this
|
||||
SMMU.
|
||||
|
||||
- calxeda,smmu-secure-config-access : Enable proper handling of buggy
|
||||
implementations that always use secure access to
|
||||
SMMU configuration registers. In this case non-secure
|
||||
|
|
|
@ -0,0 +1,182 @@
|
|||
This document describes the generic device tree binding for IOMMUs and their
|
||||
master(s).
|
||||
|
||||
|
||||
IOMMU device node:
|
||||
==================
|
||||
|
||||
An IOMMU can provide the following services:
|
||||
|
||||
* Remap address space to allow devices to access physical memory ranges that
|
||||
they otherwise wouldn't be capable of accessing.
|
||||
|
||||
Example: 32-bit DMA to 64-bit physical addresses
|
||||
|
||||
* Implement scatter-gather at page level granularity so that the device does
|
||||
not have to.
|
||||
|
||||
* Provide system protection against "rogue" DMA by forcing all accesses to go
|
||||
through the IOMMU and faulting when encountering accesses to unmapped
|
||||
address regions.
|
||||
|
||||
* Provide address space isolation between multiple contexts.
|
||||
|
||||
Example: Virtualization
|
||||
|
||||
Device nodes compatible with this binding represent hardware with some of the
|
||||
above capabilities.
|
||||
|
||||
IOMMUs can be single-master or multiple-master. Single-master IOMMU devices
|
||||
typically have a fixed association to the master device, whereas multiple-
|
||||
master IOMMU devices can translate accesses from more than one master.
|
||||
|
||||
The device tree node of the IOMMU device's parent bus must contain a valid
|
||||
"dma-ranges" property that describes how the physical address space of the
|
||||
IOMMU maps to memory. An empty "dma-ranges" property means that there is a
|
||||
1:1 mapping from IOMMU to memory.
|
||||
|
||||
Required properties:
|
||||
--------------------
|
||||
- #iommu-cells: The number of cells in an IOMMU specifier needed to encode an
|
||||
address.
|
||||
|
||||
The meaning of the IOMMU specifier is defined by the device tree binding of
|
||||
the specific IOMMU. Below are a few examples of typical use-cases:
|
||||
|
||||
- #iommu-cells = <0>: Single master IOMMU devices are not configurable and
|
||||
therefore no additional information needs to be encoded in the specifier.
|
||||
This may also apply to multiple master IOMMU devices that do not allow the
|
||||
association of masters to be configured. Note that an IOMMU can by design
|
||||
be multi-master yet only expose a single master in a given configuration.
|
||||
In such cases the number of cells will usually be 1 as in the next case.
|
||||
- #iommu-cells = <1>: Multiple master IOMMU devices may need to be configured
|
||||
in order to enable translation for a given master. In such cases the single
|
||||
address cell corresponds to the master device's ID. In some cases more than
|
||||
one cell can be required to represent a single master ID.
|
||||
- #iommu-cells = <4>: Some IOMMU devices allow the DMA window for masters to
|
||||
be configured. The first cell of the address in this may contain the master
|
||||
device's ID for example, while the second cell could contain the start of
|
||||
the DMA window for the given device. The length of the DMA window is given
|
||||
by the third and fourth cells.
|
||||
|
||||
Note that these are merely examples and real-world use-cases may use different
|
||||
definitions to represent their individual needs. Always refer to the specific
|
||||
IOMMU binding for the exact meaning of the cells that make up the specifier.
|
||||
|
||||
|
||||
IOMMU master node:
|
||||
==================
|
||||
|
||||
Devices that access memory through an IOMMU are called masters. A device can
|
||||
have multiple master interfaces (to one or more IOMMU devices).
|
||||
|
||||
Required properties:
|
||||
--------------------
|
||||
- iommus: A list of phandle and IOMMU specifier pairs that describe the IOMMU
|
||||
master interfaces of the device. One entry in the list describes one master
|
||||
interface of the device.
|
||||
|
||||
When an "iommus" property is specified in a device tree node, the IOMMU will
|
||||
be used for address translation. If a "dma-ranges" property exists in the
|
||||
device's parent node it will be ignored. An exception to this rule is if the
|
||||
referenced IOMMU is disabled, in which case the "dma-ranges" property of the
|
||||
parent shall take effect. Note that merely disabling a device tree node does
|
||||
not guarantee that the IOMMU is really disabled since the hardware may not
|
||||
have a means to turn off translation. But it is invalid in such cases to
|
||||
disable the IOMMU's device tree node in the first place because it would
|
||||
prevent any driver from properly setting up the translations.
|
||||
|
||||
|
||||
Notes:
|
||||
======
|
||||
|
||||
One possible extension to the above is to use an "iommus" property along with
|
||||
a "dma-ranges" property in a bus device node (such as PCI host bridges). This
|
||||
can be useful to describe how children on the bus relate to the IOMMU if they
|
||||
are not explicitly listed in the device tree (e.g. PCI devices). However, the
|
||||
requirements of that use-case haven't been fully determined yet. Implementing
|
||||
this is therefore not recommended without further discussion and extension of
|
||||
this binding.
|
||||
|
||||
|
||||
Examples:
|
||||
=========
|
||||
|
||||
Single-master IOMMU:
|
||||
--------------------
|
||||
|
||||
iommu {
|
||||
#iommu-cells = <0>;
|
||||
};
|
||||
|
||||
master {
|
||||
iommus = <&{/iommu}>;
|
||||
};
|
||||
|
||||
Multiple-master IOMMU with fixed associations:
|
||||
----------------------------------------------
|
||||
|
||||
/* multiple-master IOMMU */
|
||||
iommu {
|
||||
/*
|
||||
* Masters are statically associated with this IOMMU and share
|
||||
* the same address translations because the IOMMU does not
|
||||
* have sufficient information to distinguish between masters.
|
||||
*
|
||||
* Consequently address translation is always on or off for
|
||||
* all masters at any given point in time.
|
||||
*/
|
||||
#iommu-cells = <0>;
|
||||
};
|
||||
|
||||
/* static association with IOMMU */
|
||||
master@1 {
|
||||
reg = <1>;
|
||||
iommus = <&{/iommu}>;
|
||||
};
|
||||
|
||||
/* static association with IOMMU */
|
||||
master@2 {
|
||||
reg = <2>;
|
||||
iommus = <&{/iommu}>;
|
||||
};
|
||||
|
||||
Multiple-master IOMMU:
|
||||
----------------------
|
||||
|
||||
iommu {
|
||||
/* the specifier represents the ID of the master */
|
||||
#iommu-cells = <1>;
|
||||
};
|
||||
|
||||
master@1 {
|
||||
/* device has master ID 42 in the IOMMU */
|
||||
iommus = <&{/iommu} 42>;
|
||||
};
|
||||
|
||||
master@2 {
|
||||
/* device has master IDs 23 and 24 in the IOMMU */
|
||||
iommus = <&{/iommu} 23>, <&{/iommu} 24>;
|
||||
};
|
||||
|
||||
Multiple-master IOMMU with configurable DMA window:
|
||||
---------------------------------------------------
|
||||
|
||||
/ {
|
||||
iommu {
|
||||
/*
|
||||
* One cell for the master ID and one cell for the
|
||||
* address of the DMA window. The length of the DMA
|
||||
* window is encoded in two cells.
|
||||
*
|
||||
* The DMA window is the range addressable by the
|
||||
* master (i.e. the I/O virtual address space).
|
||||
*/
|
||||
#iommu-cells = <4>;
|
||||
};
|
||||
|
||||
master {
|
||||
/* master ID 42, 4 GiB DMA window starting at 0 */
|
||||
iommus = <&{/iommu} 42 0 0x1 0x0>;
|
||||
};
|
||||
};
|
|
@ -34,8 +34,6 @@ static int __init omap_iommu_dev_init(struct omap_hwmod *oh, void *unused)
|
|||
|
||||
pdata->name = oh->name;
|
||||
pdata->nr_tlb_entries = a->nr_tlb_entries;
|
||||
pdata->da_start = a->da_start;
|
||||
pdata->da_end = a->da_end;
|
||||
|
||||
if (oh->rst_lines_cnt == 1) {
|
||||
pdata->reset_name = oh->rst_lines->name;
|
||||
|
|
|
@ -2986,8 +2986,6 @@ static struct omap_hwmod_class omap3xxx_mmu_hwmod_class = {
|
|||
/* mmu isp */
|
||||
|
||||
static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
|
||||
.da_start = 0x0,
|
||||
.da_end = 0xfffff000,
|
||||
.nr_tlb_entries = 8,
|
||||
};
|
||||
|
||||
|
@ -3026,8 +3024,6 @@ static struct omap_hwmod omap3xxx_mmu_isp_hwmod = {
|
|||
/* mmu iva */
|
||||
|
||||
static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
|
||||
.da_start = 0x11000000,
|
||||
.da_end = 0xfffff000,
|
||||
.nr_tlb_entries = 32,
|
||||
};
|
||||
|
||||
|
|
|
@ -2084,8 +2084,6 @@ static struct omap_hwmod_class omap44xx_mmu_hwmod_class = {
|
|||
/* mmu ipu */
|
||||
|
||||
static struct omap_mmu_dev_attr mmu_ipu_dev_attr = {
|
||||
.da_start = 0x0,
|
||||
.da_end = 0xfffff000,
|
||||
.nr_tlb_entries = 32,
|
||||
};
|
||||
|
||||
|
@ -2133,8 +2131,6 @@ static struct omap_hwmod omap44xx_mmu_ipu_hwmod = {
|
|||
/* mmu dsp */
|
||||
|
||||
static struct omap_mmu_dev_attr mmu_dsp_dev_attr = {
|
||||
.da_start = 0x0,
|
||||
.da_end = 0xfffff000,
|
||||
.nr_tlb_entries = 32,
|
||||
};
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ config AMD_IOMMU_STATS
|
|||
|
||||
config AMD_IOMMU_V2
|
||||
tristate "AMD IOMMU Version 2 driver"
|
||||
depends on AMD_IOMMU && PROFILING
|
||||
depends on AMD_IOMMU
|
||||
select MMU_NOTIFIER
|
||||
---help---
|
||||
This option enables support for the AMD IOMMUv2 features of the IOMMU
|
||||
|
@ -143,16 +143,12 @@ config OMAP_IOMMU
|
|||
depends on ARCH_OMAP2PLUS
|
||||
select IOMMU_API
|
||||
|
||||
config OMAP_IOVMM
|
||||
tristate "OMAP IO Virtual Memory Manager Support"
|
||||
depends on OMAP_IOMMU
|
||||
|
||||
config OMAP_IOMMU_DEBUG
|
||||
tristate "Export OMAP IOMMU/IOVMM internals in DebugFS"
|
||||
depends on OMAP_IOVMM && DEBUG_FS
|
||||
tristate "Export OMAP IOMMU internals in DebugFS"
|
||||
depends on OMAP_IOMMU && DEBUG_FS
|
||||
help
|
||||
Select this to see extensive information about
|
||||
the internal state of OMAP IOMMU/IOVMM in debugfs.
|
||||
the internal state of OMAP IOMMU in debugfs.
|
||||
|
||||
Say N unless you know you need this.
|
||||
|
||||
|
@ -180,6 +176,7 @@ config EXYNOS_IOMMU
|
|||
bool "Exynos IOMMU Support"
|
||||
depends on ARCH_EXYNOS
|
||||
select IOMMU_API
|
||||
select ARM_DMA_USE_IOMMU
|
||||
help
|
||||
Support for the IOMMU (System MMU) of Samsung Exynos application
|
||||
processor family. This enables H/W multimedia accelerators to see
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
obj-$(CONFIG_IOMMU_API) += iommu.o
|
||||
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
|
||||
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
|
||||
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
|
||||
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
|
||||
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
|
||||
|
@ -11,7 +12,6 @@ obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
|
|||
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
|
||||
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
|
||||
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
|
||||
obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
|
||||
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
|
||||
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
|
||||
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
|
||||
|
|
|
@ -46,7 +46,6 @@
|
|||
#include "amd_iommu_proto.h"
|
||||
#include "amd_iommu_types.h"
|
||||
#include "irq_remapping.h"
|
||||
#include "pci.h"
|
||||
|
||||
#define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
|
||||
|
||||
|
@ -81,7 +80,7 @@ LIST_HEAD(hpet_map);
|
|||
*/
|
||||
static struct protection_domain *pt_domain;
|
||||
|
||||
static struct iommu_ops amd_iommu_ops;
|
||||
static const struct iommu_ops amd_iommu_ops;
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
|
||||
int amd_iommu_max_glx_val = -1;
|
||||
|
@ -133,9 +132,6 @@ static void free_dev_data(struct iommu_dev_data *dev_data)
|
|||
list_del(&dev_data->dev_data_list);
|
||||
spin_unlock_irqrestore(&dev_data_list_lock, flags);
|
||||
|
||||
if (dev_data->group)
|
||||
iommu_group_put(dev_data->group);
|
||||
|
||||
kfree(dev_data);
|
||||
}
|
||||
|
||||
|
@ -264,167 +260,79 @@ static bool check_device(struct device *dev)
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct pci_bus *find_hosted_bus(struct pci_bus *bus)
|
||||
{
|
||||
while (!bus->self) {
|
||||
if (!pci_is_root_bus(bus))
|
||||
bus = bus->parent;
|
||||
else
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
return bus;
|
||||
}
|
||||
|
||||
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
||||
|
||||
static struct pci_dev *get_isolation_root(struct pci_dev *pdev)
|
||||
{
|
||||
struct pci_dev *dma_pdev = pdev;
|
||||
|
||||
/* Account for quirked devices */
|
||||
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
|
||||
|
||||
/*
|
||||
* If it's a multifunction device that does not support our
|
||||
* required ACS flags, add to the same group as lowest numbered
|
||||
* function that also does not suport the required ACS flags.
|
||||
*/
|
||||
if (dma_pdev->multifunction &&
|
||||
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
|
||||
u8 i, slot = PCI_SLOT(dma_pdev->devfn);
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
struct pci_dev *tmp;
|
||||
|
||||
tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
|
||||
if (!tmp)
|
||||
continue;
|
||||
|
||||
if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
|
||||
swap_pci_ref(&dma_pdev, tmp);
|
||||
break;
|
||||
}
|
||||
pci_dev_put(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Devices on the root bus go through the iommu. If that's not us,
|
||||
* find the next upstream device and test ACS up to the root bus.
|
||||
* Finding the next device may require skipping virtual buses.
|
||||
*/
|
||||
while (!pci_is_root_bus(dma_pdev->bus)) {
|
||||
struct pci_bus *bus = find_hosted_bus(dma_pdev->bus);
|
||||
if (IS_ERR(bus))
|
||||
break;
|
||||
|
||||
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
|
||||
break;
|
||||
|
||||
swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
|
||||
}
|
||||
|
||||
return dma_pdev;
|
||||
}
|
||||
|
||||
static int use_pdev_iommu_group(struct pci_dev *pdev, struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = iommu_group_get(&pdev->dev);
|
||||
int ret;
|
||||
|
||||
if (!group) {
|
||||
group = iommu_group_alloc();
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
||||
WARN_ON(&pdev->dev != dev);
|
||||
}
|
||||
|
||||
ret = iommu_group_add_device(group, dev);
|
||||
iommu_group_put(group);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int use_dev_data_iommu_group(struct iommu_dev_data *dev_data,
|
||||
struct device *dev)
|
||||
{
|
||||
if (!dev_data->group) {
|
||||
struct iommu_group *group = iommu_group_alloc();
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
||||
dev_data->group = group;
|
||||
}
|
||||
|
||||
return iommu_group_add_device(dev_data->group, dev);
|
||||
}
|
||||
|
||||
static int init_iommu_group(struct device *dev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct iommu_group *group;
|
||||
struct pci_dev *dma_pdev;
|
||||
int ret;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (group) {
|
||||
iommu_group_put(group);
|
||||
return 0;
|
||||
}
|
||||
group = iommu_group_get_for_dev(dev);
|
||||
|
||||
dev_data = find_dev_data(get_device_id(dev));
|
||||
if (!dev_data)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(group))
|
||||
return PTR_ERR(group);
|
||||
|
||||
if (dev_data->alias_data) {
|
||||
u16 alias;
|
||||
struct pci_bus *bus;
|
||||
iommu_group_put(group);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (dev_data->alias_data->group)
|
||||
goto use_group;
|
||||
static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
|
||||
{
|
||||
*(u16 *)data = alias;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the alias device exists, it's effectively just a first
|
||||
* level quirk for finding the DMA source.
|
||||
*/
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
|
||||
if (dma_pdev) {
|
||||
dma_pdev = get_isolation_root(dma_pdev);
|
||||
goto use_pdev;
|
||||
static u16 get_alias(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
u16 devid, ivrs_alias, pci_alias;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
ivrs_alias = amd_iommu_alias_table[devid];
|
||||
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
|
||||
|
||||
if (ivrs_alias == pci_alias)
|
||||
return ivrs_alias;
|
||||
|
||||
/*
|
||||
* DMA alias showdown
|
||||
*
|
||||
* The IVRS is fairly reliable in telling us about aliases, but it
|
||||
* can't know about every screwy device. If we don't have an IVRS
|
||||
* reported alias, use the PCI reported alias. In that case we may
|
||||
* still need to initialize the rlookup and dev_table entries if the
|
||||
* alias is to a non-existent device.
|
||||
*/
|
||||
if (ivrs_alias == devid) {
|
||||
if (!amd_iommu_rlookup_table[pci_alias]) {
|
||||
amd_iommu_rlookup_table[pci_alias] =
|
||||
amd_iommu_rlookup_table[devid];
|
||||
memcpy(amd_iommu_dev_table[pci_alias].data,
|
||||
amd_iommu_dev_table[devid].data,
|
||||
sizeof(amd_iommu_dev_table[pci_alias].data));
|
||||
}
|
||||
|
||||
/*
|
||||
* If the alias is virtual, try to find a parent device
|
||||
* and test whether the IOMMU group is actualy rooted above
|
||||
* the alias. Be careful to also test the parent device if
|
||||
* we think the alias is the root of the group.
|
||||
*/
|
||||
bus = pci_find_bus(0, alias >> 8);
|
||||
if (!bus)
|
||||
goto use_group;
|
||||
|
||||
bus = find_hosted_bus(bus);
|
||||
if (IS_ERR(bus) || !bus->self)
|
||||
goto use_group;
|
||||
|
||||
dma_pdev = get_isolation_root(pci_dev_get(bus->self));
|
||||
if (dma_pdev != bus->self || (dma_pdev->multifunction &&
|
||||
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)))
|
||||
goto use_pdev;
|
||||
|
||||
pci_dev_put(dma_pdev);
|
||||
goto use_group;
|
||||
return pci_alias;
|
||||
}
|
||||
|
||||
dma_pdev = get_isolation_root(pci_dev_get(to_pci_dev(dev)));
|
||||
use_pdev:
|
||||
ret = use_pdev_iommu_group(dma_pdev, dev);
|
||||
pci_dev_put(dma_pdev);
|
||||
return ret;
|
||||
use_group:
|
||||
return use_dev_data_iommu_group(dev_data->alias_data, dev);
|
||||
pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
|
||||
"for device %s[%04x:%04x], kernel reported alias "
|
||||
"%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
|
||||
PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
|
||||
PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
|
||||
PCI_FUNC(pci_alias));
|
||||
|
||||
/*
|
||||
* If we don't have a PCI DMA alias and the IVRS alias is on the same
|
||||
* bus, then the IVRS table may know about a quirk that we don't.
|
||||
*/
|
||||
if (pci_alias == devid &&
|
||||
PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
|
||||
pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
|
||||
pdev->dma_alias_devfn = ivrs_alias & 0xff;
|
||||
pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
|
||||
PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
|
||||
dev_name(dev));
|
||||
}
|
||||
|
||||
return ivrs_alias;
|
||||
}
|
||||
|
||||
static int iommu_init_device(struct device *dev)
|
||||
|
@ -441,7 +349,8 @@ static int iommu_init_device(struct device *dev)
|
|||
if (!dev_data)
|
||||
return -ENOMEM;
|
||||
|
||||
alias = amd_iommu_alias_table[dev_data->devid];
|
||||
alias = get_alias(dev);
|
||||
|
||||
if (alias != dev_data->devid) {
|
||||
struct iommu_dev_data *alias_data;
|
||||
|
||||
|
@ -470,6 +379,9 @@ static int iommu_init_device(struct device *dev)
|
|||
|
||||
dev->archdata.iommu = dev_data;
|
||||
|
||||
iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
|
||||
dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -489,12 +401,22 @@ static void iommu_ignore_device(struct device *dev)
|
|||
|
||||
static void iommu_uninit_device(struct device *dev)
|
||||
{
|
||||
struct iommu_dev_data *dev_data = search_dev_data(get_device_id(dev));
|
||||
|
||||
if (!dev_data)
|
||||
return;
|
||||
|
||||
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
|
||||
dev);
|
||||
|
||||
iommu_group_remove_device(dev);
|
||||
|
||||
/* Unlink from alias, it may change if another device is re-plugged */
|
||||
dev_data->alias_data = NULL;
|
||||
|
||||
/*
|
||||
* Nothing to do here - we keep dev_data around for unplugged devices
|
||||
* and reuse it when the device is re-plugged - not doing so would
|
||||
* introduce a ton of races.
|
||||
* We keep dev_data around for unplugged devices and reuse it when the
|
||||
* device is re-plugged - not doing so would introduce a ton of races.
|
||||
*/
|
||||
}
|
||||
|
||||
|
@ -3473,7 +3395,7 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_ops amd_iommu_ops = {
|
||||
static const struct iommu_ops amd_iommu_ops = {
|
||||
.domain_init = amd_iommu_domain_init,
|
||||
.domain_destroy = amd_iommu_domain_destroy,
|
||||
.attach_dev = amd_iommu_attach_device,
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/msi.h>
|
||||
#include <linux/amd-iommu.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/gart.h>
|
||||
|
@ -1197,6 +1198,39 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
|
|||
iommu->max_counters = (u8) ((val >> 7) & 0xf);
|
||||
}
|
||||
|
||||
static ssize_t amd_iommu_show_cap(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amd_iommu *iommu = dev_get_drvdata(dev);
|
||||
return sprintf(buf, "%x\n", iommu->cap);
|
||||
}
|
||||
static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
|
||||
|
||||
static ssize_t amd_iommu_show_features(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct amd_iommu *iommu = dev_get_drvdata(dev);
|
||||
return sprintf(buf, "%llx\n", iommu->features);
|
||||
}
|
||||
static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
|
||||
|
||||
static struct attribute *amd_iommu_attrs[] = {
|
||||
&dev_attr_cap.attr,
|
||||
&dev_attr_features.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group amd_iommu_group = {
|
||||
.name = "amd-iommu",
|
||||
.attrs = amd_iommu_attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *amd_iommu_groups[] = {
|
||||
&amd_iommu_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static int iommu_init_pci(struct amd_iommu *iommu)
|
||||
{
|
||||
|
@ -1297,6 +1331,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
|
|||
|
||||
amd_iommu_erratum_746_workaround(iommu);
|
||||
|
||||
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
|
||||
amd_iommu_groups, "ivhd%d",
|
||||
iommu->index);
|
||||
|
||||
return pci_enable_device(iommu->dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -390,12 +390,6 @@ struct amd_iommu_fault {
|
|||
|
||||
};
|
||||
|
||||
#define PPR_FAULT_EXEC (1 << 1)
|
||||
#define PPR_FAULT_READ (1 << 2)
|
||||
#define PPR_FAULT_WRITE (1 << 5)
|
||||
#define PPR_FAULT_USER (1 << 6)
|
||||
#define PPR_FAULT_RSVD (1 << 7)
|
||||
#define PPR_FAULT_GN (1 << 8)
|
||||
|
||||
struct iommu_domain;
|
||||
|
||||
|
@ -432,7 +426,6 @@ struct iommu_dev_data {
|
|||
struct iommu_dev_data *alias_data;/* The alias dev_data */
|
||||
struct protection_domain *domain; /* Domain the device is bound to */
|
||||
atomic_t bind; /* Domain attach reference count */
|
||||
struct iommu_group *group; /* IOMMU group for virtual aliases */
|
||||
u16 devid; /* PCI Device ID */
|
||||
bool iommu_v2; /* Device can make use of IOMMUv2 */
|
||||
bool passthrough; /* Default for device is pt_domain */
|
||||
|
@ -578,6 +571,9 @@ struct amd_iommu {
|
|||
/* default dma_ops domain for that IOMMU */
|
||||
struct dma_ops_domain *default_dom;
|
||||
|
||||
/* IOMMU sysfs device */
|
||||
struct device *iommu_dev;
|
||||
|
||||
/*
|
||||
* We can't rely on the BIOS to restore all values on reinit, so we
|
||||
* need to stash them
|
||||
|
|
|
@ -47,12 +47,13 @@ struct pasid_state {
|
|||
atomic_t count; /* Reference count */
|
||||
unsigned mmu_notifier_count; /* Counting nested mmu_notifier
|
||||
calls */
|
||||
struct task_struct *task; /* Task bound to this PASID */
|
||||
struct mm_struct *mm; /* mm_struct for the faults */
|
||||
struct mmu_notifier mn; /* mmu_otifier handle */
|
||||
struct mmu_notifier mn; /* mmu_notifier handle */
|
||||
struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
|
||||
struct device_state *device_state; /* Link to our device_state */
|
||||
int pasid; /* PASID index */
|
||||
bool invalid; /* Used during setup and
|
||||
teardown of the pasid */
|
||||
spinlock_t lock; /* Protect pri_queues and
|
||||
mmu_notifer_count */
|
||||
wait_queue_head_t wq; /* To wait for count == 0 */
|
||||
|
@ -99,7 +100,6 @@ static struct workqueue_struct *iommu_wq;
|
|||
static u64 *empty_page_table;
|
||||
|
||||
static void free_pasid_states(struct device_state *dev_state);
|
||||
static void unbind_pasid(struct device_state *dev_state, int pasid);
|
||||
|
||||
static u16 device_id(struct pci_dev *pdev)
|
||||
{
|
||||
|
@ -297,37 +297,29 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
|
|||
schedule();
|
||||
|
||||
finish_wait(&pasid_state->wq, &wait);
|
||||
mmput(pasid_state->mm);
|
||||
free_pasid_state(pasid_state);
|
||||
}
|
||||
|
||||
static void __unbind_pasid(struct pasid_state *pasid_state)
|
||||
static void unbind_pasid(struct pasid_state *pasid_state)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
domain = pasid_state->device_state->domain;
|
||||
|
||||
/*
|
||||
* Mark pasid_state as invalid, no more faults will we added to the
|
||||
* work queue after this is visible everywhere.
|
||||
*/
|
||||
pasid_state->invalid = true;
|
||||
|
||||
/* Make sure this is visible */
|
||||
smp_wmb();
|
||||
|
||||
/* After this the device/pasid can't access the mm anymore */
|
||||
amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
|
||||
clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
|
||||
|
||||
/* Make sure no more pending faults are in the queue */
|
||||
flush_workqueue(iommu_wq);
|
||||
|
||||
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
|
||||
|
||||
put_pasid_state(pasid_state); /* Reference taken in bind() function */
|
||||
}
|
||||
|
||||
static void unbind_pasid(struct device_state *dev_state, int pasid)
|
||||
{
|
||||
struct pasid_state *pasid_state;
|
||||
|
||||
pasid_state = get_pasid_state(dev_state, pasid);
|
||||
if (pasid_state == NULL)
|
||||
return;
|
||||
|
||||
__unbind_pasid(pasid_state);
|
||||
put_pasid_state_wait(pasid_state); /* Reference taken in this function */
|
||||
}
|
||||
|
||||
static void free_pasid_states_level1(struct pasid_state **tbl)
|
||||
|
@ -373,6 +365,12 @@ static void free_pasid_states(struct device_state *dev_state)
|
|||
* unbind the PASID
|
||||
*/
|
||||
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
|
||||
|
||||
put_pasid_state_wait(pasid_state); /* Reference taken in
|
||||
amd_iommu_bind_pasid */
|
||||
|
||||
/* Drop reference taken in amd_iommu_bind_pasid */
|
||||
put_device_state(dev_state);
|
||||
}
|
||||
|
||||
if (dev_state->pasid_levels == 2)
|
||||
|
@ -411,14 +409,6 @@ static int mn_clear_flush_young(struct mmu_notifier *mn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mn_change_pte(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long address,
|
||||
pte_t pte)
|
||||
{
|
||||
__mn_flush_page(mn, address);
|
||||
}
|
||||
|
||||
static void mn_invalidate_page(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
|
@ -472,22 +462,23 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|||
{
|
||||
struct pasid_state *pasid_state;
|
||||
struct device_state *dev_state;
|
||||
bool run_inv_ctx_cb;
|
||||
|
||||
might_sleep();
|
||||
|
||||
pasid_state = mn_to_state(mn);
|
||||
dev_state = pasid_state->device_state;
|
||||
pasid_state = mn_to_state(mn);
|
||||
dev_state = pasid_state->device_state;
|
||||
run_inv_ctx_cb = !pasid_state->invalid;
|
||||
|
||||
if (pasid_state->device_state->inv_ctx_cb)
|
||||
if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
|
||||
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
|
||||
|
||||
unbind_pasid(dev_state, pasid_state->pasid);
|
||||
unbind_pasid(pasid_state);
|
||||
}
|
||||
|
||||
static struct mmu_notifier_ops iommu_mn = {
|
||||
.release = mn_release,
|
||||
.clear_flush_young = mn_clear_flush_young,
|
||||
.change_pte = mn_change_pte,
|
||||
.invalidate_page = mn_invalidate_page,
|
||||
.invalidate_range_start = mn_invalidate_range_start,
|
||||
.invalidate_range_end = mn_invalidate_range_end,
|
||||
|
@ -529,7 +520,7 @@ static void do_fault(struct work_struct *work)
|
|||
write = !!(fault->flags & PPR_FAULT_WRITE);
|
||||
|
||||
down_read(&fault->state->mm->mmap_sem);
|
||||
npages = get_user_pages(fault->state->task, fault->state->mm,
|
||||
npages = get_user_pages(NULL, fault->state->mm,
|
||||
fault->address, 1, write, 0, &page, NULL);
|
||||
up_read(&fault->state->mm->mmap_sem);
|
||||
|
||||
|
@ -587,7 +578,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
|
|||
goto out;
|
||||
|
||||
pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
|
||||
if (pasid_state == NULL) {
|
||||
if (pasid_state == NULL || pasid_state->invalid) {
|
||||
/* We know the device but not the PASID -> send INVALID */
|
||||
amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
|
||||
PPR_INVALID, tag);
|
||||
|
@ -612,6 +603,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
|
|||
fault->state = pasid_state;
|
||||
fault->tag = tag;
|
||||
fault->finish = finish;
|
||||
fault->pasid = iommu_fault->pasid;
|
||||
fault->flags = iommu_fault->flags;
|
||||
INIT_WORK(&fault->work, do_fault);
|
||||
|
||||
|
@ -620,6 +612,10 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
|
|||
ret = NOTIFY_OK;
|
||||
|
||||
out_drop_state:
|
||||
|
||||
if (ret != NOTIFY_OK && pasid_state)
|
||||
put_pasid_state(pasid_state);
|
||||
|
||||
put_device_state(dev_state);
|
||||
|
||||
out:
|
||||
|
@ -635,6 +631,7 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
|
|||
{
|
||||
struct pasid_state *pasid_state;
|
||||
struct device_state *dev_state;
|
||||
struct mm_struct *mm;
|
||||
u16 devid;
|
||||
int ret;
|
||||
|
||||
|
@ -658,20 +655,23 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
|
|||
if (pasid_state == NULL)
|
||||
goto out;
|
||||
|
||||
|
||||
atomic_set(&pasid_state->count, 1);
|
||||
init_waitqueue_head(&pasid_state->wq);
|
||||
spin_lock_init(&pasid_state->lock);
|
||||
|
||||
pasid_state->task = task;
|
||||
pasid_state->mm = get_task_mm(task);
|
||||
mm = get_task_mm(task);
|
||||
pasid_state->mm = mm;
|
||||
pasid_state->device_state = dev_state;
|
||||
pasid_state->pasid = pasid;
|
||||
pasid_state->invalid = true; /* Mark as valid only if we are
|
||||
done with setting up the pasid */
|
||||
pasid_state->mn.ops = &iommu_mn;
|
||||
|
||||
if (pasid_state->mm == NULL)
|
||||
goto out_free;
|
||||
|
||||
mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
|
||||
mmu_notifier_register(&pasid_state->mn, mm);
|
||||
|
||||
ret = set_pasid_state(dev_state, pasid_state, pasid);
|
||||
if (ret)
|
||||
|
@ -682,15 +682,26 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
|
|||
if (ret)
|
||||
goto out_clear_state;
|
||||
|
||||
/* Now we are ready to handle faults */
|
||||
pasid_state->invalid = false;
|
||||
|
||||
/*
|
||||
* Drop the reference to the mm_struct here. We rely on the
|
||||
* mmu_notifier release call-back to inform us when the mm
|
||||
* is going away.
|
||||
*/
|
||||
mmput(mm);
|
||||
|
||||
return 0;
|
||||
|
||||
out_clear_state:
|
||||
clear_pasid_state(dev_state, pasid);
|
||||
|
||||
out_unregister:
|
||||
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
|
||||
mmu_notifier_unregister(&pasid_state->mn, mm);
|
||||
|
||||
out_free:
|
||||
mmput(mm);
|
||||
free_pasid_state(pasid_state);
|
||||
|
||||
out:
|
||||
|
@ -728,10 +739,22 @@ void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
|
|||
*/
|
||||
put_pasid_state(pasid_state);
|
||||
|
||||
/* This will call the mn_release function and unbind the PASID */
|
||||
/* Clear the pasid state so that the pasid can be re-used */
|
||||
clear_pasid_state(dev_state, pasid_state->pasid);
|
||||
|
||||
/*
|
||||
* Call mmu_notifier_unregister to drop our reference
|
||||
* to pasid_state->mm
|
||||
*/
|
||||
mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
|
||||
|
||||
put_pasid_state_wait(pasid_state); /* Reference taken in
|
||||
amd_iommu_bind_pasid */
|
||||
out:
|
||||
/* Drop reference taken in this function */
|
||||
put_device_state(dev_state);
|
||||
|
||||
/* Drop reference taken in amd_iommu_bind_pasid */
|
||||
put_device_state(dev_state);
|
||||
}
|
||||
EXPORT_SYMBOL(amd_iommu_unbind_pasid);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -38,6 +38,7 @@
|
|||
#include <linux/tboot.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/iommu_table.h>
|
||||
|
||||
|
@ -980,6 +981,12 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
raw_spin_lock_init(&iommu->register_lock);
|
||||
|
||||
drhd->iommu = iommu;
|
||||
|
||||
if (intel_iommu_enabled)
|
||||
iommu->iommu_dev = iommu_device_create(NULL, iommu,
|
||||
intel_iommu_groups,
|
||||
iommu->name);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
|
@ -991,6 +998,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
|
|||
|
||||
static void free_iommu(struct intel_iommu *iommu)
|
||||
{
|
||||
iommu_device_destroy(iommu->iommu_dev);
|
||||
|
||||
if (iommu->irq) {
|
||||
free_irq(iommu->irq, iommu);
|
||||
irq_set_handler_data(iommu->irq, NULL);
|
||||
|
@ -1339,9 +1348,6 @@ int dmar_enable_qi(struct intel_iommu *iommu)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
qi->free_head = qi->free_tail = 0;
|
||||
qi->free_cnt = QI_LENGTH;
|
||||
|
||||
raw_spin_lock_init(&qi->q_lock);
|
||||
|
||||
__dmar_enable_qi(iommu);
|
||||
|
|
|
@ -1170,7 +1170,7 @@ static void exynos_iommu_remove_device(struct device *dev)
|
|||
iommu_group_remove_device(dev);
|
||||
}
|
||||
|
||||
static struct iommu_ops exynos_iommu_ops = {
|
||||
static const struct iommu_ops exynos_iommu_ops = {
|
||||
.domain_init = exynos_iommu_domain_init,
|
||||
.domain_destroy = exynos_iommu_domain_destroy,
|
||||
.attach_dev = exynos_iommu_attach_device,
|
||||
|
|
|
@ -92,7 +92,7 @@ struct gen_pool *spaace_pool;
|
|||
* subwindow count per liodn.
|
||||
*
|
||||
*/
|
||||
u32 pamu_get_max_subwin_cnt()
|
||||
u32 pamu_get_max_subwin_cnt(void)
|
||||
{
|
||||
return max_subwindow_count;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,6 @@
|
|||
#include <sysdev/fsl_pci.h>
|
||||
|
||||
#include "fsl_pamu_domain.h"
|
||||
#include "pci.h"
|
||||
|
||||
/*
|
||||
* Global spinlock that needs to be held while
|
||||
|
@ -887,8 +886,6 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
||||
|
||||
static struct iommu_group *get_device_iommu_group(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group;
|
||||
|
@ -945,74 +942,13 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
|
|||
struct pci_controller *pci_ctl;
|
||||
bool pci_endpt_partioning;
|
||||
struct iommu_group *group = NULL;
|
||||
struct pci_dev *bridge, *dma_pdev = NULL;
|
||||
|
||||
pci_ctl = pci_bus_to_host(pdev->bus);
|
||||
pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
|
||||
/* We can partition PCIe devices so assign device group to the device */
|
||||
if (pci_endpt_partioning) {
|
||||
bridge = pci_find_upstream_pcie_bridge(pdev);
|
||||
if (bridge) {
|
||||
if (pci_is_pcie(bridge))
|
||||
dma_pdev = pci_get_domain_bus_and_slot(
|
||||
pci_domain_nr(pdev->bus),
|
||||
bridge->subordinate->number, 0);
|
||||
if (!dma_pdev)
|
||||
dma_pdev = pci_dev_get(bridge);
|
||||
} else
|
||||
dma_pdev = pci_dev_get(pdev);
|
||||
group = iommu_group_get_for_dev(&pdev->dev);
|
||||
|
||||
/* Account for quirked devices */
|
||||
swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
|
||||
|
||||
/*
|
||||
* If it's a multifunction device that does not support our
|
||||
* required ACS flags, add to the same group as lowest numbered
|
||||
* function that also does not suport the required ACS flags.
|
||||
*/
|
||||
if (dma_pdev->multifunction &&
|
||||
!pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) {
|
||||
u8 i, slot = PCI_SLOT(dma_pdev->devfn);
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
struct pci_dev *tmp;
|
||||
|
||||
tmp = pci_get_slot(dma_pdev->bus, PCI_DEVFN(slot, i));
|
||||
if (!tmp)
|
||||
continue;
|
||||
|
||||
if (!pci_acs_enabled(tmp, REQ_ACS_FLAGS)) {
|
||||
swap_pci_ref(&dma_pdev, tmp);
|
||||
break;
|
||||
}
|
||||
pci_dev_put(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Devices on the root bus go through the iommu. If that's not us,
|
||||
* find the next upstream device and test ACS up to the root bus.
|
||||
* Finding the next device may require skipping virtual buses.
|
||||
*/
|
||||
while (!pci_is_root_bus(dma_pdev->bus)) {
|
||||
struct pci_bus *bus = dma_pdev->bus;
|
||||
|
||||
while (!bus->self) {
|
||||
if (!pci_is_root_bus(bus))
|
||||
bus = bus->parent;
|
||||
else
|
||||
goto root_bus;
|
||||
}
|
||||
|
||||
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
|
||||
break;
|
||||
|
||||
swap_pci_ref(&dma_pdev, pci_dev_get(bus->self));
|
||||
}
|
||||
|
||||
root_bus:
|
||||
group = get_device_iommu_group(&dma_pdev->dev);
|
||||
pci_dev_put(dma_pdev);
|
||||
/*
|
||||
* PCIe controller is not a paritionable entity
|
||||
* free the controller device iommu_group.
|
||||
|
@ -1116,8 +1052,7 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
|
|||
ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
|
||||
((w_count > 1) ? w_count : 0));
|
||||
if (!ret) {
|
||||
if (dma_domain->win_arr)
|
||||
kfree(dma_domain->win_arr);
|
||||
kfree(dma_domain->win_arr);
|
||||
dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
|
||||
w_count, GFP_ATOMIC);
|
||||
if (!dma_domain->win_arr) {
|
||||
|
@ -1138,7 +1073,7 @@ static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
|
|||
return dma_domain->win_cnt;
|
||||
}
|
||||
|
||||
static struct iommu_ops fsl_pamu_ops = {
|
||||
static const struct iommu_ops fsl_pamu_ops = {
|
||||
.domain_init = fsl_pamu_domain_init,
|
||||
.domain_destroy = fsl_pamu_domain_destroy,
|
||||
.attach_dev = fsl_pamu_attach_device,
|
||||
|
@ -1155,7 +1090,7 @@ static struct iommu_ops fsl_pamu_ops = {
|
|||
.remove_device = fsl_pamu_remove_device,
|
||||
};
|
||||
|
||||
int pamu_domain_init()
|
||||
int pamu_domain_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -70,6 +70,11 @@ static int get_irte(int irq, struct irte *entry)
|
|||
|
||||
raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
|
||||
|
||||
if (unlikely(!irq_iommu->iommu)) {
|
||||
raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
index = irq_iommu->irte_index + irq_iommu->sub_handle;
|
||||
*entry = *(irq_iommu->iommu->ir_table->base + index);
|
||||
|
||||
|
@ -369,29 +374,52 @@ static int set_hpet_sid(struct irte *irte, u8 id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct set_msi_sid_data {
|
||||
struct pci_dev *pdev;
|
||||
u16 alias;
|
||||
};
|
||||
|
||||
static int set_msi_sid_cb(struct pci_dev *pdev, u16 alias, void *opaque)
|
||||
{
|
||||
struct set_msi_sid_data *data = opaque;
|
||||
|
||||
data->pdev = pdev;
|
||||
data->alias = alias;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *bridge;
|
||||
struct set_msi_sid_data data;
|
||||
|
||||
if (!irte || !dev)
|
||||
return -1;
|
||||
|
||||
/* PCIe device or Root Complex integrated PCI device */
|
||||
if (pci_is_pcie(dev) || !dev->bus->parent) {
|
||||
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
|
||||
(dev->bus->number << 8) | dev->devfn);
|
||||
return 0;
|
||||
}
|
||||
pci_for_each_dma_alias(dev, set_msi_sid_cb, &data);
|
||||
|
||||
bridge = pci_find_upstream_pcie_bridge(dev);
|
||||
if (bridge) {
|
||||
if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
|
||||
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
|
||||
(bridge->bus->number << 8) | dev->bus->number);
|
||||
else /* this is a legacy PCI bridge */
|
||||
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
|
||||
(bridge->bus->number << 8) | bridge->devfn);
|
||||
}
|
||||
/*
|
||||
* DMA alias provides us with a PCI device and alias. The only case
|
||||
* where the it will return an alias on a different bus than the
|
||||
* device is the case of a PCIe-to-PCI bridge, where the alias is for
|
||||
* the subordinate bus. In this case we can only verify the bus.
|
||||
*
|
||||
* If the alias device is on a different bus than our source device
|
||||
* then we have a topology based alias, use it.
|
||||
*
|
||||
* Otherwise, the alias is for a device DMA quirk and we cannot
|
||||
* assume that MSI uses the same requester ID. Therefore use the
|
||||
* original device.
|
||||
*/
|
||||
if (PCI_BUS_NUM(data.alias) != data.pdev->bus->number)
|
||||
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
|
||||
PCI_DEVID(PCI_BUS_NUM(data.alias),
|
||||
dev->bus->number));
|
||||
else if (data.pdev->bus->number != dev->bus->number)
|
||||
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
|
||||
else
|
||||
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
|
||||
PCI_DEVID(dev->bus->number, dev->devfn));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
/*
|
||||
* IOMMU sysfs class support
|
||||
*
|
||||
* Copyright (C) 2014 Red Hat, Inc. All rights reserved.
|
||||
* Author: Alex Williamson <alex.williamson@redhat.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
* We provide a common class "devices" group which initially has no attributes.
|
||||
* As devices are added to the IOMMU, we'll add links to the group.
|
||||
*/
|
||||
static struct attribute *devices_attr[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group iommu_devices_attr_group = {
|
||||
.name = "devices",
|
||||
.attrs = devices_attr,
|
||||
};
|
||||
|
||||
static const struct attribute_group *iommu_dev_groups[] = {
|
||||
&iommu_devices_attr_group,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static void iommu_release_device(struct device *dev)
|
||||
{
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static struct class iommu_class = {
|
||||
.name = "iommu",
|
||||
.dev_release = iommu_release_device,
|
||||
.dev_groups = iommu_dev_groups,
|
||||
};
|
||||
|
||||
static int __init iommu_dev_init(void)
|
||||
{
|
||||
return class_register(&iommu_class);
|
||||
}
|
||||
postcore_initcall(iommu_dev_init);
|
||||
|
||||
/*
|
||||
* Create an IOMMU device and return a pointer to it. IOMMU specific
|
||||
* attributes can be provided as an attribute group, allowing a unique
|
||||
* namespace per IOMMU type.
|
||||
*/
|
||||
struct device *iommu_device_create(struct device *parent, void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
struct device *dev;
|
||||
va_list vargs;
|
||||
int ret;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
device_initialize(dev);
|
||||
|
||||
dev->class = &iommu_class;
|
||||
dev->parent = parent;
|
||||
dev->groups = groups;
|
||||
dev_set_drvdata(dev, drvdata);
|
||||
|
||||
va_start(vargs, fmt);
|
||||
ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
|
||||
va_end(vargs);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = device_add(dev);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return dev;
|
||||
|
||||
error:
|
||||
put_device(dev);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
void iommu_device_destroy(struct device *dev)
|
||||
{
|
||||
if (!dev || IS_ERR(dev))
|
||||
return;
|
||||
|
||||
device_unregister(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* IOMMU drivers can indicate a device is managed by a given IOMMU using
|
||||
* this interface. A link to the device will be created in the "devices"
|
||||
* directory of the IOMMU device in sysfs and an "iommu" link will be
|
||||
* created under the linked device, pointing back at the IOMMU device.
|
||||
*/
|
||||
int iommu_device_link(struct device *dev, struct device *link)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!dev || IS_ERR(dev))
|
||||
return -ENODEV;
|
||||
|
||||
ret = sysfs_add_link_to_group(&dev->kobj, "devices",
|
||||
&link->kobj, dev_name(link));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu");
|
||||
if (ret)
|
||||
sysfs_remove_link_from_group(&dev->kobj, "devices",
|
||||
dev_name(link));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void iommu_device_unlink(struct device *dev, struct device *link)
|
||||
{
|
||||
if (!dev || IS_ERR(dev))
|
||||
return;
|
||||
|
||||
sysfs_remove_link(&link->kobj, "iommu");
|
||||
sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link));
|
||||
}
|
|
@ -29,12 +29,17 @@
|
|||
#include <linux/idr.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/pci.h>
|
||||
#include <trace/events/iommu.h>
|
||||
|
||||
static struct kset *iommu_group_kset;
|
||||
static struct ida iommu_group_ida;
|
||||
static struct mutex iommu_group_mutex;
|
||||
|
||||
struct iommu_callback_data {
|
||||
const struct iommu_ops *ops;
|
||||
};
|
||||
|
||||
struct iommu_group {
|
||||
struct kobject kobj;
|
||||
struct kobject *devices_kobj;
|
||||
|
@ -514,9 +519,191 @@ int iommu_group_id(struct iommu_group *group)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_id);
|
||||
|
||||
/*
|
||||
* To consider a PCI device isolated, we require ACS to support Source
|
||||
* Validation, Request Redirection, Completer Redirection, and Upstream
|
||||
* Forwarding. This effectively means that devices cannot spoof their
|
||||
* requester ID, requests and completions cannot be redirected, and all
|
||||
* transactions are forwarded upstream, even as it passes through a
|
||||
* bridge where the target device is downstream.
|
||||
*/
|
||||
#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
|
||||
|
||||
struct group_for_pci_data {
|
||||
struct pci_dev *pdev;
|
||||
struct iommu_group *group;
|
||||
};
|
||||
|
||||
/*
|
||||
* DMA alias iterator callback, return the last seen device. Stop and return
|
||||
* the IOMMU group if we find one along the way.
|
||||
*/
|
||||
static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
|
||||
{
|
||||
struct group_for_pci_data *data = opaque;
|
||||
|
||||
data->pdev = pdev;
|
||||
data->group = iommu_group_get(&pdev->dev);
|
||||
|
||||
return data->group != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use standard PCI bus topology, isolation features, and DMA alias quirks
|
||||
* to find or create an IOMMU group for a device.
|
||||
*/
|
||||
static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
|
||||
{
|
||||
struct group_for_pci_data data;
|
||||
struct pci_bus *bus;
|
||||
struct iommu_group *group = NULL;
|
||||
struct pci_dev *tmp;
|
||||
|
||||
/*
|
||||
* Find the upstream DMA alias for the device. A device must not
|
||||
* be aliased due to topology in order to have its own IOMMU group.
|
||||
* If we find an alias along the way that already belongs to a
|
||||
* group, use it.
|
||||
*/
|
||||
if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
|
||||
return data.group;
|
||||
|
||||
pdev = data.pdev;
|
||||
|
||||
/*
|
||||
* Continue upstream from the point of minimum IOMMU granularity
|
||||
* due to aliases to the point where devices are protected from
|
||||
* peer-to-peer DMA by PCI ACS. Again, if we find an existing
|
||||
* group, use it.
|
||||
*/
|
||||
for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
|
||||
if (!bus->self)
|
||||
continue;
|
||||
|
||||
if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
|
||||
break;
|
||||
|
||||
pdev = bus->self;
|
||||
|
||||
group = iommu_group_get(&pdev->dev);
|
||||
if (group)
|
||||
return group;
|
||||
}
|
||||
|
||||
/*
|
||||
* Next we need to consider DMA alias quirks. If one device aliases
|
||||
* to another, they should be grouped together. It's theoretically
|
||||
* possible that aliases could create chains of devices where each
|
||||
* device aliases another device. If we then factor in multifunction
|
||||
* ACS grouping requirements, each alias could incorporate a new slot
|
||||
* with multiple functions, each with aliases. This is all extremely
|
||||
* unlikely as DMA alias quirks are typically only used for PCIe
|
||||
* devices where we usually have a single slot per bus. Furthermore,
|
||||
* the alias quirk is usually to another function within the slot
|
||||
* (and ACS multifunction is not supported) or to a different slot
|
||||
* that doesn't physically exist. The likely scenario is therefore
|
||||
* that everything on the bus gets grouped together. To reduce the
|
||||
* problem space, share the IOMMU group for all devices on the bus
|
||||
* if a DMA alias quirk is present on the bus.
|
||||
*/
|
||||
tmp = NULL;
|
||||
for_each_pci_dev(tmp) {
|
||||
if (tmp->bus != pdev->bus ||
|
||||
!(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN))
|
||||
continue;
|
||||
|
||||
pci_dev_put(tmp);
|
||||
tmp = NULL;
|
||||
|
||||
/* We have an alias quirk, search for an existing group */
|
||||
for_each_pci_dev(tmp) {
|
||||
struct iommu_group *group_tmp;
|
||||
|
||||
if (tmp->bus != pdev->bus)
|
||||
continue;
|
||||
|
||||
group_tmp = iommu_group_get(&tmp->dev);
|
||||
if (!group) {
|
||||
group = group_tmp;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (group_tmp) {
|
||||
WARN_ON(group != group_tmp);
|
||||
iommu_group_put(group_tmp);
|
||||
}
|
||||
}
|
||||
|
||||
return group ? group : iommu_group_alloc();
|
||||
}
|
||||
|
||||
/*
|
||||
* Non-multifunction devices or multifunction devices supporting
|
||||
* ACS get their own group.
|
||||
*/
|
||||
if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
|
||||
return iommu_group_alloc();
|
||||
|
||||
/*
|
||||
* Multifunction devices not supporting ACS share a group with other
|
||||
* similar devices in the same slot.
|
||||
*/
|
||||
tmp = NULL;
|
||||
for_each_pci_dev(tmp) {
|
||||
if (tmp == pdev || tmp->bus != pdev->bus ||
|
||||
PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
|
||||
pci_acs_enabled(tmp, REQ_ACS_FLAGS))
|
||||
continue;
|
||||
|
||||
group = iommu_group_get(&tmp->dev);
|
||||
if (group) {
|
||||
pci_dev_put(tmp);
|
||||
return group;
|
||||
}
|
||||
}
|
||||
|
||||
/* No shared group found, allocate new */
|
||||
return iommu_group_alloc();
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
|
||||
* @dev: target device
|
||||
*
|
||||
* This function is intended to be called by IOMMU drivers and extended to
|
||||
* support common, bus-defined algorithms when determining or creating the
|
||||
* IOMMU group for a device. On success, the caller will hold a reference
|
||||
* to the returned IOMMU group, which will already include the provided
|
||||
* device. The reference should be released with iommu_group_put().
|
||||
*/
|
||||
struct iommu_group *iommu_group_get_for_dev(struct device *dev)
|
||||
{
|
||||
struct iommu_group *group = ERR_PTR(-EIO);
|
||||
int ret;
|
||||
|
||||
group = iommu_group_get(dev);
|
||||
if (group)
|
||||
return group;
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
group = iommu_group_get_for_pci_dev(to_pci_dev(dev));
|
||||
|
||||
if (IS_ERR(group))
|
||||
return group;
|
||||
|
||||
ret = iommu_group_add_device(group, dev);
|
||||
if (ret) {
|
||||
iommu_group_put(group);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return group;
|
||||
}
|
||||
|
||||
static int add_iommu_group(struct device *dev, void *data)
|
||||
{
|
||||
struct iommu_ops *ops = data;
|
||||
struct iommu_callback_data *cb = data;
|
||||
const struct iommu_ops *ops = cb->ops;
|
||||
|
||||
if (!ops->add_device)
|
||||
return -ENODEV;
|
||||
|
@ -532,7 +719,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
|
|||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
struct iommu_group *group;
|
||||
unsigned long group_action = 0;
|
||||
|
||||
|
@ -585,10 +772,14 @@ static struct notifier_block iommu_bus_nb = {
|
|||
.notifier_call = iommu_bus_notifier,
|
||||
};
|
||||
|
||||
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
|
||||
static void iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
|
||||
{
|
||||
struct iommu_callback_data cb = {
|
||||
.ops = ops,
|
||||
};
|
||||
|
||||
bus_register_notifier(bus, &iommu_bus_nb);
|
||||
bus_for_each_dev(bus, NULL, ops, add_iommu_group);
|
||||
bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -604,7 +795,7 @@ static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
|
|||
* is set up. With this function the iommu-driver can set the iommu-ops
|
||||
* afterwards.
|
||||
*/
|
||||
int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
|
||||
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
|
||||
{
|
||||
if (bus->iommu_ops != NULL)
|
||||
return -EBUSY;
|
||||
|
|
|
@ -1120,7 +1120,7 @@ static void ipmmu_remove_device(struct device *dev)
|
|||
dev->archdata.iommu = NULL;
|
||||
}
|
||||
|
||||
static struct iommu_ops ipmmu_ops = {
|
||||
static const struct iommu_ops ipmmu_ops = {
|
||||
.domain_init = ipmmu_domain_init,
|
||||
.domain_destroy = ipmmu_domain_destroy,
|
||||
.attach_dev = ipmmu_attach_device,
|
||||
|
|
|
@ -674,7 +674,7 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_ops msm_iommu_ops = {
|
||||
static const struct iommu_ops msm_iommu_ops = {
|
||||
.domain_init = msm_iommu_domain_init,
|
||||
.domain_destroy = msm_iommu_domain_destroy,
|
||||
.attach_dev = msm_iommu_attach_dev,
|
||||
|
|
|
@ -213,116 +213,6 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
|
|||
return bytes;
|
||||
}
|
||||
|
||||
static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct device *dev = file->private_data;
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
char *p, *buf;
|
||||
struct iovm_struct *tmp;
|
||||
int uninitialized_var(i);
|
||||
ssize_t bytes;
|
||||
|
||||
buf = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = buf;
|
||||
|
||||
p += sprintf(p, "%-3s %-8s %-8s %6s %8s\n",
|
||||
"No", "start", "end", "size", "flags");
|
||||
p += sprintf(p, "-------------------------------------------------\n");
|
||||
|
||||
mutex_lock(&iommu_debug_lock);
|
||||
|
||||
list_for_each_entry(tmp, &obj->mmap, list) {
|
||||
size_t len;
|
||||
const char *str = "%3d %08x-%08x %6x %8x\n";
|
||||
const int maxcol = 39;
|
||||
|
||||
len = tmp->da_end - tmp->da_start;
|
||||
p += snprintf(p, maxcol, str,
|
||||
i, tmp->da_start, tmp->da_end, len, tmp->flags);
|
||||
|
||||
if (PAGE_SIZE - (p - buf) < maxcol)
|
||||
break;
|
||||
i++;
|
||||
}
|
||||
|
||||
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
|
||||
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct device *dev = file->private_data;
|
||||
char *p, *buf;
|
||||
struct iovm_struct *area;
|
||||
ssize_t bytes;
|
||||
|
||||
count = min_t(ssize_t, count, PAGE_SIZE);
|
||||
|
||||
buf = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = buf;
|
||||
|
||||
mutex_lock(&iommu_debug_lock);
|
||||
|
||||
area = omap_find_iovm_area(dev, (u32)ppos);
|
||||
if (!area) {
|
||||
bytes = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
memcpy(p, area->va, count);
|
||||
p += count;
|
||||
|
||||
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
|
||||
err_out:
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
return bytes;
|
||||
}
|
||||
|
||||
static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct device *dev = file->private_data;
|
||||
struct iovm_struct *area;
|
||||
char *p, *buf;
|
||||
|
||||
count = min_t(size_t, count, PAGE_SIZE);
|
||||
|
||||
buf = (char *)__get_free_page(GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
p = buf;
|
||||
|
||||
mutex_lock(&iommu_debug_lock);
|
||||
|
||||
if (copy_from_user(p, userbuf, count)) {
|
||||
count = -EFAULT;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
area = omap_find_iovm_area(dev, (u32)ppos);
|
||||
if (!area) {
|
||||
count = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
memcpy(area->va, p, count);
|
||||
err_out:
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
#define DEBUG_FOPS(name) \
|
||||
static const struct file_operations debug_##name##_fops = { \
|
||||
.open = simple_open, \
|
||||
|
@ -342,8 +232,6 @@ DEBUG_FOPS_RO(ver);
|
|||
DEBUG_FOPS_RO(regs);
|
||||
DEBUG_FOPS_RO(tlb);
|
||||
DEBUG_FOPS(pagetable);
|
||||
DEBUG_FOPS_RO(mmap);
|
||||
DEBUG_FOPS(mem);
|
||||
|
||||
#define __DEBUG_ADD_FILE(attr, mode) \
|
||||
{ \
|
||||
|
@ -389,8 +277,6 @@ static int iommu_debug_register(struct device *dev, void *data)
|
|||
DEBUG_ADD_FILE_RO(regs);
|
||||
DEBUG_ADD_FILE_RO(tlb);
|
||||
DEBUG_ADD_FILE(pagetable);
|
||||
DEBUG_ADD_FILE_RO(mmap);
|
||||
DEBUG_ADD_FILE(mem);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -959,31 +959,18 @@ static int omap_iommu_probe(struct platform_device *pdev)
|
|||
return err;
|
||||
if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
|
||||
return -EINVAL;
|
||||
/*
|
||||
* da_start and da_end are needed for omap-iovmm, so hardcode
|
||||
* these values as used by OMAP3 ISP - the only user for
|
||||
* omap-iovmm
|
||||
*/
|
||||
obj->da_start = 0;
|
||||
obj->da_end = 0xfffff000;
|
||||
if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
|
||||
obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
|
||||
} else {
|
||||
obj->nr_tlb_entries = pdata->nr_tlb_entries;
|
||||
obj->name = pdata->name;
|
||||
obj->da_start = pdata->da_start;
|
||||
obj->da_end = pdata->da_end;
|
||||
}
|
||||
if (obj->da_end <= obj->da_start)
|
||||
return -EINVAL;
|
||||
|
||||
obj->dev = &pdev->dev;
|
||||
obj->ctx = (void *)obj + sizeof(*obj);
|
||||
|
||||
spin_lock_init(&obj->iommu_lock);
|
||||
mutex_init(&obj->mmap_lock);
|
||||
spin_lock_init(&obj->page_table_lock);
|
||||
INIT_LIST_HEAD(&obj->mmap);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
obj->regbase = devm_ioremap_resource(obj->dev, res);
|
||||
|
@ -1291,7 +1278,7 @@ static void omap_iommu_remove_device(struct device *dev)
|
|||
kfree(arch_data);
|
||||
}
|
||||
|
||||
static struct iommu_ops omap_iommu_ops = {
|
||||
static const struct iommu_ops omap_iommu_ops = {
|
||||
.domain_init = omap_iommu_domain_init,
|
||||
.domain_destroy = omap_iommu_domain_destroy,
|
||||
.attach_dev = omap_iommu_attach_dev,
|
||||
|
|
|
@ -46,12 +46,7 @@ struct omap_iommu {
|
|||
|
||||
int nr_tlb_entries;
|
||||
|
||||
struct list_head mmap;
|
||||
struct mutex mmap_lock; /* protect mmap */
|
||||
|
||||
void *ctx; /* iommu context: registres saved area */
|
||||
u32 da_start;
|
||||
u32 da_end;
|
||||
|
||||
int has_bus_err_back;
|
||||
};
|
||||
|
@ -154,9 +149,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
|
|||
#define MMU_RAM_PADDR_MASK \
|
||||
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
|
||||
|
||||
#define MMU_RAM_ENDIAN_SHIFT 9
|
||||
#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT)
|
||||
#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
|
||||
#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT)
|
||||
|
||||
#define MMU_RAM_ELSZ_SHIFT 7
|
||||
#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define MMU_RAM_ELSZ_16 (1 << MMU_RAM_ELSZ_SHIFT)
|
||||
|
|
|
@ -1,791 +0,0 @@
|
|||
/*
|
||||
* omap iommu: simple virtual address space management
|
||||
*
|
||||
* Copyright (C) 2008-2009 Nokia Corporation
|
||||
*
|
||||
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/omap-iommu.h>
|
||||
#include <linux/platform_data/iommu-omap.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
#include "omap-iopgtable.h"
|
||||
#include "omap-iommu.h"
|
||||
|
||||
/*
|
||||
* IOVMF_FLAGS: attribute for iommu virtual memory area(iovma)
|
||||
*
|
||||
* lower 16 bit is used for h/w and upper 16 bit is for s/w.
|
||||
*/
|
||||
#define IOVMF_SW_SHIFT 16
|
||||
|
||||
/*
|
||||
* iovma: h/w flags derived from cam and ram attribute
|
||||
*/
|
||||
#define IOVMF_CAM_MASK (~((1 << 10) - 1))
|
||||
#define IOVMF_RAM_MASK (~IOVMF_CAM_MASK)
|
||||
|
||||
#define IOVMF_PGSZ_MASK (3 << 0)
|
||||
#define IOVMF_PGSZ_1M MMU_CAM_PGSZ_1M
|
||||
#define IOVMF_PGSZ_64K MMU_CAM_PGSZ_64K
|
||||
#define IOVMF_PGSZ_4K MMU_CAM_PGSZ_4K
|
||||
#define IOVMF_PGSZ_16M MMU_CAM_PGSZ_16M
|
||||
|
||||
#define IOVMF_ENDIAN_MASK (1 << 9)
|
||||
#define IOVMF_ENDIAN_BIG MMU_RAM_ENDIAN_BIG
|
||||
|
||||
#define IOVMF_ELSZ_MASK (3 << 7)
|
||||
#define IOVMF_ELSZ_16 MMU_RAM_ELSZ_16
|
||||
#define IOVMF_ELSZ_32 MMU_RAM_ELSZ_32
|
||||
#define IOVMF_ELSZ_NONE MMU_RAM_ELSZ_NONE
|
||||
|
||||
#define IOVMF_MIXED_MASK (1 << 6)
|
||||
#define IOVMF_MIXED MMU_RAM_MIXED
|
||||
|
||||
/*
|
||||
* iovma: s/w flags, used for mapping and umapping internally.
|
||||
*/
|
||||
#define IOVMF_MMIO (1 << IOVMF_SW_SHIFT)
|
||||
#define IOVMF_ALLOC (2 << IOVMF_SW_SHIFT)
|
||||
#define IOVMF_ALLOC_MASK (3 << IOVMF_SW_SHIFT)
|
||||
|
||||
/* "superpages" is supported just with physically linear pages */
|
||||
#define IOVMF_DISCONT (1 << (2 + IOVMF_SW_SHIFT))
|
||||
#define IOVMF_LINEAR (2 << (2 + IOVMF_SW_SHIFT))
|
||||
#define IOVMF_LINEAR_MASK (3 << (2 + IOVMF_SW_SHIFT))
|
||||
|
||||
#define IOVMF_DA_FIXED (1 << (4 + IOVMF_SW_SHIFT))
|
||||
|
||||
static struct kmem_cache *iovm_area_cachep;
|
||||
|
||||
/* return the offset of the first scatterlist entry in a sg table */
|
||||
static unsigned int sgtable_offset(const struct sg_table *sgt)
|
||||
{
|
||||
if (!sgt || !sgt->nents)
|
||||
return 0;
|
||||
|
||||
return sgt->sgl->offset;
|
||||
}
|
||||
|
||||
/* return total bytes of sg buffers */
|
||||
static size_t sgtable_len(const struct sg_table *sgt)
|
||||
{
|
||||
unsigned int i, total = 0;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (!sgt)
|
||||
return 0;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
size_t bytes;
|
||||
|
||||
bytes = sg->length + sg->offset;
|
||||
|
||||
if (!iopgsz_ok(bytes)) {
|
||||
pr_err("%s: sg[%d] not iommu pagesize(%u %u)\n",
|
||||
__func__, i, bytes, sg->offset);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (i && sg->offset) {
|
||||
pr_err("%s: sg[%d] offset not allowed in internal entries\n",
|
||||
__func__, i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
total += bytes;
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
#define sgtable_ok(x) (!!sgtable_len(x))
|
||||
|
||||
static unsigned max_alignment(u32 addr)
|
||||
{
|
||||
int i;
|
||||
unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
|
||||
for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
|
||||
;
|
||||
return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* calculate the optimal number sg elements from total bytes based on
|
||||
* iommu superpages
|
||||
*/
|
||||
static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
|
||||
{
|
||||
unsigned nr_entries = 0, ent_sz;
|
||||
|
||||
if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
|
||||
pr_err("%s: wrong size %08x\n", __func__, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
while (bytes) {
|
||||
ent_sz = max_alignment(da | pa);
|
||||
ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
|
||||
nr_entries++;
|
||||
da += ent_sz;
|
||||
pa += ent_sz;
|
||||
bytes -= ent_sz;
|
||||
}
|
||||
|
||||
return nr_entries;
|
||||
}
|
||||
|
||||
/* allocate and initialize sg_table header(a kind of 'superblock') */
|
||||
static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
|
||||
u32 da, u32 pa)
|
||||
{
|
||||
unsigned int nr_entries;
|
||||
int err;
|
||||
struct sg_table *sgt;
|
||||
|
||||
if (!bytes)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!IS_ALIGNED(bytes, PAGE_SIZE))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (flags & IOVMF_LINEAR) {
|
||||
nr_entries = sgtable_nents(bytes, da, pa);
|
||||
if (!nr_entries)
|
||||
return ERR_PTR(-EINVAL);
|
||||
} else
|
||||
nr_entries = bytes / PAGE_SIZE;
|
||||
|
||||
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
|
||||
if (!sgt)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL);
|
||||
if (err) {
|
||||
kfree(sgt);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries);
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
/* free sg_table header(a kind of superblock) */
|
||||
static void sgtable_free(struct sg_table *sgt)
|
||||
{
|
||||
if (!sgt)
|
||||
return;
|
||||
|
||||
sg_free_table(sgt);
|
||||
kfree(sgt);
|
||||
|
||||
pr_debug("%s: sgt:%p\n", __func__, sgt);
|
||||
}
|
||||
|
||||
/* map 'sglist' to a contiguous mpu virtual area and return 'va' */
|
||||
static void *vmap_sg(const struct sg_table *sgt)
|
||||
{
|
||||
u32 va;
|
||||
size_t total;
|
||||
unsigned int i;
|
||||
struct scatterlist *sg;
|
||||
struct vm_struct *new;
|
||||
const struct mem_type *mtype;
|
||||
|
||||
mtype = get_mem_type(MT_DEVICE);
|
||||
if (!mtype)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
total = sgtable_len(sgt);
|
||||
if (!total)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END);
|
||||
if (!new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
va = (u32)new->addr;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
size_t bytes;
|
||||
u32 pa;
|
||||
int err;
|
||||
|
||||
pa = sg_phys(sg) - sg->offset;
|
||||
bytes = sg->length + sg->offset;
|
||||
|
||||
BUG_ON(bytes != PAGE_SIZE);
|
||||
|
||||
err = ioremap_page(va, pa, mtype);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
va += bytes;
|
||||
}
|
||||
|
||||
flush_cache_vmap((unsigned long)new->addr,
|
||||
(unsigned long)(new->addr + total));
|
||||
return new->addr;
|
||||
|
||||
err_out:
|
||||
WARN_ON(1); /* FIXME: cleanup some mpu mappings */
|
||||
vunmap(new->addr);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
static inline void vunmap_sg(const void *va)
|
||||
{
|
||||
vunmap(va);
|
||||
}
|
||||
|
||||
static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
|
||||
const u32 da)
|
||||
{
|
||||
struct iovm_struct *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &obj->mmap, list) {
|
||||
if ((da >= tmp->da_start) && (da < tmp->da_end)) {
|
||||
size_t len;
|
||||
|
||||
len = tmp->da_end - tmp->da_start;
|
||||
|
||||
dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n",
|
||||
__func__, tmp->da_start, da, tmp->da_end, len,
|
||||
tmp->flags);
|
||||
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_find_iovm_area - find iovma which includes @da
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
*
|
||||
* Find the existing iovma starting at @da
|
||||
*/
|
||||
struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
struct iovm_struct *area;
|
||||
|
||||
mutex_lock(&obj->mmap_lock);
|
||||
area = __find_iovm_area(obj, da);
|
||||
mutex_unlock(&obj->mmap_lock);
|
||||
|
||||
return area;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_find_iovm_area);
|
||||
|
||||
/*
|
||||
* This finds the hole(area) which fits the requested address and len
|
||||
* in iovmas mmap, and returns the new allocated iovma.
|
||||
*/
|
||||
static struct iovm_struct *alloc_iovm_area(struct omap_iommu *obj, u32 da,
|
||||
size_t bytes, u32 flags)
|
||||
{
|
||||
struct iovm_struct *new, *tmp;
|
||||
u32 start, prev_end, alignment;
|
||||
|
||||
if (!obj || !bytes)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
start = da;
|
||||
alignment = PAGE_SIZE;
|
||||
|
||||
if (~flags & IOVMF_DA_FIXED) {
|
||||
/* Don't map address 0 */
|
||||
start = obj->da_start ? obj->da_start : alignment;
|
||||
|
||||
if (flags & IOVMF_LINEAR)
|
||||
alignment = iopgsz_max(bytes);
|
||||
start = roundup(start, alignment);
|
||||
} else if (start < obj->da_start || start > obj->da_end ||
|
||||
obj->da_end - start < bytes) {
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
tmp = NULL;
|
||||
if (list_empty(&obj->mmap))
|
||||
goto found;
|
||||
|
||||
prev_end = 0;
|
||||
list_for_each_entry(tmp, &obj->mmap, list) {
|
||||
|
||||
if (prev_end > start)
|
||||
break;
|
||||
|
||||
if (tmp->da_start > start && (tmp->da_start - start) >= bytes)
|
||||
goto found;
|
||||
|
||||
if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED)
|
||||
start = roundup(tmp->da_end + 1, alignment);
|
||||
|
||||
prev_end = tmp->da_end;
|
||||
}
|
||||
|
||||
if ((start >= prev_end) && (obj->da_end - start >= bytes))
|
||||
goto found;
|
||||
|
||||
dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n",
|
||||
__func__, da, bytes, flags);
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
found:
|
||||
new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL);
|
||||
if (!new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
new->iommu = obj;
|
||||
new->da_start = start;
|
||||
new->da_end = start + bytes;
|
||||
new->flags = flags;
|
||||
|
||||
/*
|
||||
* keep ascending order of iovmas
|
||||
*/
|
||||
if (tmp)
|
||||
list_add_tail(&new->list, &tmp->list);
|
||||
else
|
||||
list_add(&new->list, &obj->mmap);
|
||||
|
||||
dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n",
|
||||
__func__, new->da_start, start, new->da_end, bytes, flags);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
|
||||
{
|
||||
size_t bytes;
|
||||
|
||||
BUG_ON(!obj || !area);
|
||||
|
||||
bytes = area->da_end - area->da_start;
|
||||
|
||||
dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n",
|
||||
__func__, area->da_start, area->da_end, bytes, area->flags);
|
||||
|
||||
list_del(&area->list);
|
||||
kmem_cache_free(iovm_area_cachep, area);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_da_to_va - convert (d) to (v)
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
* @va: mpu virtual address
|
||||
*
|
||||
* Returns mpu virtual addr which corresponds to a given device virtual addr
|
||||
*/
|
||||
void *omap_da_to_va(struct device *dev, u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
void *va = NULL;
|
||||
struct iovm_struct *area;
|
||||
|
||||
mutex_lock(&obj->mmap_lock);
|
||||
|
||||
area = __find_iovm_area(obj, da);
|
||||
if (!area) {
|
||||
dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
|
||||
goto out;
|
||||
}
|
||||
va = area->va;
|
||||
out:
|
||||
mutex_unlock(&obj->mmap_lock);
|
||||
|
||||
return va;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_da_to_va);
|
||||
|
||||
static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
|
||||
{
|
||||
unsigned int i;
|
||||
struct scatterlist *sg;
|
||||
void *va = _va;
|
||||
void *va_end;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
struct page *pg;
|
||||
const size_t bytes = PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* iommu 'superpage' isn't supported with 'omap_iommu_vmalloc()'
|
||||
*/
|
||||
pg = vmalloc_to_page(va);
|
||||
BUG_ON(!pg);
|
||||
sg_set_page(sg, pg, bytes, 0);
|
||||
|
||||
va += bytes;
|
||||
}
|
||||
|
||||
va_end = _va + PAGE_SIZE * i;
|
||||
}
|
||||
|
||||
static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
|
||||
{
|
||||
/*
|
||||
* Actually this is not necessary at all, just exists for
|
||||
* consistency of the code readability.
|
||||
*/
|
||||
BUG_ON(!sgt);
|
||||
}
|
||||
|
||||
/* create 'da' <-> 'pa' mapping from 'sgt' */
|
||||
static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
|
||||
const struct sg_table *sgt, u32 flags)
|
||||
{
|
||||
int err;
|
||||
unsigned int i, j;
|
||||
struct scatterlist *sg;
|
||||
u32 da = new->da_start;
|
||||
|
||||
if (!domain || !sgt)
|
||||
return -EINVAL;
|
||||
|
||||
BUG_ON(!sgtable_ok(sgt));
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
u32 pa;
|
||||
size_t bytes;
|
||||
|
||||
pa = sg_phys(sg) - sg->offset;
|
||||
bytes = sg->length + sg->offset;
|
||||
|
||||
flags &= ~IOVMF_PGSZ_MASK;
|
||||
|
||||
if (bytes_to_iopgsz(bytes) < 0)
|
||||
goto err_out;
|
||||
|
||||
pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
|
||||
i, da, pa, bytes);
|
||||
|
||||
err = iommu_map(domain, da, pa, bytes, flags);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
da = new->da_start;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, i, j) {
|
||||
size_t bytes;
|
||||
|
||||
bytes = sg->length + sg->offset;
|
||||
|
||||
/* ignore failures.. we're already handling one */
|
||||
iommu_unmap(domain, da, bytes);
|
||||
|
||||
da += bytes;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
/* release 'da' <-> 'pa' mapping */
|
||||
static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
|
||||
struct iovm_struct *area)
|
||||
{
|
||||
u32 start;
|
||||
size_t total = area->da_end - area->da_start;
|
||||
const struct sg_table *sgt = area->sgt;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
size_t unmapped;
|
||||
|
||||
BUG_ON(!sgtable_ok(sgt));
|
||||
BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
|
||||
|
||||
start = area->da_start;
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
size_t bytes;
|
||||
|
||||
bytes = sg->length + sg->offset;
|
||||
|
||||
unmapped = iommu_unmap(domain, start, bytes);
|
||||
if (unmapped < bytes)
|
||||
break;
|
||||
|
||||
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
|
||||
__func__, start, bytes, area->flags);
|
||||
|
||||
BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
|
||||
|
||||
total -= bytes;
|
||||
start += bytes;
|
||||
}
|
||||
BUG_ON(total);
|
||||
}
|
||||
|
||||
/* template function for all unmapping */
|
||||
static struct sg_table *unmap_vm_area(struct iommu_domain *domain,
|
||||
struct omap_iommu *obj, const u32 da,
|
||||
void (*fn)(const void *), u32 flags)
|
||||
{
|
||||
struct sg_table *sgt = NULL;
|
||||
struct iovm_struct *area;
|
||||
|
||||
if (!IS_ALIGNED(da, PAGE_SIZE)) {
|
||||
dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mutex_lock(&obj->mmap_lock);
|
||||
|
||||
area = __find_iovm_area(obj, da);
|
||||
if (!area) {
|
||||
dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((area->flags & flags) != flags) {
|
||||
dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__,
|
||||
area->flags);
|
||||
goto out;
|
||||
}
|
||||
sgt = (struct sg_table *)area->sgt;
|
||||
|
||||
unmap_iovm_area(domain, obj, area);
|
||||
|
||||
fn(area->va);
|
||||
|
||||
dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__,
|
||||
area->da_start, da, area->da_end,
|
||||
area->da_end - area->da_start, area->flags);
|
||||
|
||||
free_iovm_area(obj, area);
|
||||
out:
|
||||
mutex_unlock(&obj->mmap_lock);
|
||||
|
||||
return sgt;
|
||||
}
|
||||
|
||||
static u32 map_iommu_region(struct iommu_domain *domain, struct omap_iommu *obj,
|
||||
u32 da, const struct sg_table *sgt, void *va,
|
||||
size_t bytes, u32 flags)
|
||||
{
|
||||
int err = -ENOMEM;
|
||||
struct iovm_struct *new;
|
||||
|
||||
mutex_lock(&obj->mmap_lock);
|
||||
|
||||
new = alloc_iovm_area(obj, da, bytes, flags);
|
||||
if (IS_ERR(new)) {
|
||||
err = PTR_ERR(new);
|
||||
goto err_alloc_iovma;
|
||||
}
|
||||
new->va = va;
|
||||
new->sgt = sgt;
|
||||
|
||||
if (map_iovm_area(domain, new, sgt, new->flags))
|
||||
goto err_map;
|
||||
|
||||
mutex_unlock(&obj->mmap_lock);
|
||||
|
||||
dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n",
|
||||
__func__, new->da_start, bytes, new->flags, va);
|
||||
|
||||
return new->da_start;
|
||||
|
||||
err_map:
|
||||
free_iovm_area(obj, new);
|
||||
err_alloc_iovma:
|
||||
mutex_unlock(&obj->mmap_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
__iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
|
||||
u32 da, const struct sg_table *sgt,
|
||||
void *va, size_t bytes, u32 flags)
|
||||
{
|
||||
return map_iommu_region(domain, obj, da, sgt, va, bytes, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_iommu_vmap - (d)-(p)-(v) address mapper
|
||||
* @domain: iommu domain
|
||||
* @dev: client device
|
||||
* @sgt: address of scatter gather table
|
||||
* @flags: iovma and page property
|
||||
*
|
||||
* Creates 1-n-1 mapping with given @sgt and returns @da.
|
||||
* All @sgt element must be io page size aligned.
|
||||
*/
|
||||
u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
|
||||
const struct sg_table *sgt, u32 flags)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
size_t bytes;
|
||||
void *va = NULL;
|
||||
|
||||
if (!obj || !obj->dev || !sgt)
|
||||
return -EINVAL;
|
||||
|
||||
bytes = sgtable_len(sgt);
|
||||
if (!bytes)
|
||||
return -EINVAL;
|
||||
bytes = PAGE_ALIGN(bytes);
|
||||
|
||||
if (flags & IOVMF_MMIO) {
|
||||
va = vmap_sg(sgt);
|
||||
if (IS_ERR(va))
|
||||
return PTR_ERR(va);
|
||||
}
|
||||
|
||||
flags |= IOVMF_DISCONT;
|
||||
flags |= IOVMF_MMIO;
|
||||
|
||||
da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
|
||||
if (IS_ERR_VALUE(da))
|
||||
vunmap_sg(va);
|
||||
|
||||
return da + sgtable_offset(sgt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_iommu_vmap);
|
||||
|
||||
/**
|
||||
* omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
|
||||
* @domain: iommu domain
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
*
|
||||
* Free the iommu virtually contiguous memory area starting at
|
||||
* @da, which was returned by 'omap_iommu_vmap()'.
|
||||
*/
|
||||
struct sg_table *
|
||||
omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
struct sg_table *sgt;
|
||||
/*
|
||||
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
|
||||
* Just returns 'sgt' to the caller to free
|
||||
*/
|
||||
da &= PAGE_MASK;
|
||||
sgt = unmap_vm_area(domain, obj, da, vunmap_sg,
|
||||
IOVMF_DISCONT | IOVMF_MMIO);
|
||||
if (!sgt)
|
||||
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
|
||||
return sgt;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
|
||||
|
||||
/**
|
||||
* omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
|
||||
* @dev: client device
|
||||
* @da: contiguous iommu virtual memory
|
||||
* @bytes: allocation size
|
||||
* @flags: iovma and page property
|
||||
*
|
||||
* Allocate @bytes linearly and creates 1-n-1 mapping and returns
|
||||
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
|
||||
*/
|
||||
u32
|
||||
omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
|
||||
size_t bytes, u32 flags)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
void *va;
|
||||
struct sg_table *sgt;
|
||||
|
||||
if (!obj || !obj->dev || !bytes)
|
||||
return -EINVAL;
|
||||
|
||||
bytes = PAGE_ALIGN(bytes);
|
||||
|
||||
va = vmalloc(bytes);
|
||||
if (!va)
|
||||
return -ENOMEM;
|
||||
|
||||
flags |= IOVMF_DISCONT;
|
||||
flags |= IOVMF_ALLOC;
|
||||
|
||||
sgt = sgtable_alloc(bytes, flags, da, 0);
|
||||
if (IS_ERR(sgt)) {
|
||||
da = PTR_ERR(sgt);
|
||||
goto err_sgt_alloc;
|
||||
}
|
||||
sgtable_fill_vmalloc(sgt, va);
|
||||
|
||||
da = __iommu_vmap(domain, obj, da, sgt, va, bytes, flags);
|
||||
if (IS_ERR_VALUE(da))
|
||||
goto err_iommu_vmap;
|
||||
|
||||
return da;
|
||||
|
||||
err_iommu_vmap:
|
||||
sgtable_drain_vmalloc(sgt);
|
||||
sgtable_free(sgt);
|
||||
err_sgt_alloc:
|
||||
vfree(va);
|
||||
return da;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
|
||||
|
||||
/**
|
||||
* omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
|
||||
* @dev: client device
|
||||
* @da: iommu device virtual address
|
||||
*
|
||||
* Frees the iommu virtually continuous memory area starting at
|
||||
* @da, as obtained from 'omap_iommu_vmalloc()'.
|
||||
*/
|
||||
void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
|
||||
const u32 da)
|
||||
{
|
||||
struct omap_iommu *obj = dev_to_omap_iommu(dev);
|
||||
struct sg_table *sgt;
|
||||
|
||||
sgt = unmap_vm_area(domain, obj, da, vfree,
|
||||
IOVMF_DISCONT | IOVMF_ALLOC);
|
||||
if (!sgt)
|
||||
dev_dbg(obj->dev, "%s: No sgt\n", __func__);
|
||||
sgtable_free(sgt);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(omap_iommu_vfree);
|
||||
|
||||
static int __init iovmm_init(void)
|
||||
{
|
||||
const unsigned long flags = SLAB_HWCACHE_ALIGN;
|
||||
struct kmem_cache *p;
|
||||
|
||||
p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0,
|
||||
flags, NULL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
iovm_area_cachep = p;
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(iovmm_init);
|
||||
|
||||
static void __exit iovmm_exit(void)
|
||||
{
|
||||
kmem_cache_destroy(iovm_area_cachep);
|
||||
}
|
||||
module_exit(iovmm_exit);
|
||||
|
||||
MODULE_DESCRIPTION("omap iommu: simple virtual address space management");
|
||||
MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -1,29 +0,0 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright (C) 2013 Red Hat, Inc.
|
||||
* Copyright (C) 2013 Freescale Semiconductor, Inc.
|
||||
*
|
||||
*/
|
||||
#ifndef __IOMMU_PCI_H
|
||||
#define __IOMMU_PCI_H
|
||||
|
||||
/* Helper function for swapping pci device reference */
|
||||
static inline void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
|
||||
{
|
||||
pci_dev_put(*from);
|
||||
*from = to;
|
||||
}
|
||||
|
||||
#endif /* __IOMMU_PCI_H */
|
|
@ -354,7 +354,7 @@ static int shmobile_iommu_add_device(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_ops shmobile_iommu_ops = {
|
||||
static const struct iommu_ops shmobile_iommu_ops = {
|
||||
.domain_init = shmobile_iommu_domain_init,
|
||||
.domain_destroy = shmobile_iommu_domain_destroy,
|
||||
.attach_dev = shmobile_iommu_attach_device,
|
||||
|
|
|
@ -309,7 +309,7 @@ static int gart_iommu_domain_has_cap(struct iommu_domain *domain,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct iommu_ops gart_iommu_ops = {
|
||||
static const struct iommu_ops gart_iommu_ops = {
|
||||
.domain_init = gart_iommu_domain_init,
|
||||
.domain_destroy = gart_iommu_domain_destroy,
|
||||
.attach_dev = gart_iommu_attach_dev,
|
||||
|
|
|
@ -947,7 +947,7 @@ static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
|
|||
dev_dbg(smmu->dev, "smmu_as@%p\n", as);
|
||||
}
|
||||
|
||||
static struct iommu_ops smmu_iommu_ops = {
|
||||
static const struct iommu_ops smmu_iommu_ops = {
|
||||
.domain_init = smmu_iommu_domain_init,
|
||||
.domain_destroy = smmu_iommu_domain_destroy,
|
||||
.attach_dev = smmu_iommu_attach_dev,
|
||||
|
|
|
@ -119,6 +119,13 @@ typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
|
|||
extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
|
||||
amd_iommu_invalid_ppr_cb cb);
|
||||
|
||||
#define PPR_FAULT_EXEC (1 << 1)
|
||||
#define PPR_FAULT_READ (1 << 2)
|
||||
#define PPR_FAULT_WRITE (1 << 5)
|
||||
#define PPR_FAULT_USER (1 << 6)
|
||||
#define PPR_FAULT_RSVD (1 << 7)
|
||||
#define PPR_FAULT_GN (1 << 8)
|
||||
|
||||
/**
|
||||
* amd_iommu_device_info() - Get information about IOMMUv2 support of a
|
||||
* PCI device
|
||||
|
|
|
@ -124,7 +124,7 @@ struct bus_type {
|
|||
|
||||
const struct dev_pm_ops *pm;
|
||||
|
||||
struct iommu_ops *iommu_ops;
|
||||
const struct iommu_ops *iommu_ops;
|
||||
|
||||
struct subsys_private *p;
|
||||
struct lock_class_key lock_key;
|
||||
|
|
|
@ -114,22 +114,30 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
|
|||
/* Intel IOMMU detection */
|
||||
extern int detect_intel_iommu(void);
|
||||
extern int enable_drhd_fault_handling(void);
|
||||
#else
|
||||
struct dmar_pci_notify_info;
|
||||
static inline int detect_intel_iommu(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int dmar_table_init(void)
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int iommu_detected, no_iommu;
|
||||
extern int intel_iommu_init(void);
|
||||
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
||||
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
||||
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
|
||||
#else /* !CONFIG_INTEL_IOMMU: */
|
||||
static inline int intel_iommu_init(void) { return -ENODEV; }
|
||||
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
static inline int enable_drhd_fault_handling(void)
|
||||
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
#endif /* !CONFIG_DMAR_TABLE */
|
||||
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_INTEL_IOMMU */
|
||||
|
||||
#endif /* CONFIG_DMAR_TABLE */
|
||||
|
||||
struct irte {
|
||||
union {
|
||||
|
@ -177,26 +185,4 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu);
|
|||
extern irqreturn_t dmar_fault(int irq, void *dev_id);
|
||||
extern int arch_setup_dmar_msi(unsigned int irq);
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU
|
||||
extern int iommu_detected, no_iommu;
|
||||
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header);
|
||||
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header);
|
||||
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
|
||||
extern int intel_iommu_init(void);
|
||||
#else /* !CONFIG_INTEL_IOMMU: */
|
||||
static inline int intel_iommu_init(void) { return -ENODEV; }
|
||||
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_INTEL_IOMMU */
|
||||
|
||||
#endif /* __DMAR_H__ */
|
||||
|
|
|
@ -336,6 +336,7 @@ struct intel_iommu {
|
|||
#ifdef CONFIG_IRQ_REMAP
|
||||
struct ir_table *ir_table; /* Interrupt remapping info */
|
||||
#endif
|
||||
struct device *iommu_dev; /* IOMMU-sysfs device */
|
||||
int node;
|
||||
};
|
||||
|
||||
|
@ -365,4 +366,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
|
|||
|
||||
extern int dmar_ir_support(void);
|
||||
|
||||
extern const struct attribute_group *intel_iommu_groups[];
|
||||
|
||||
#endif
|
||||
|
|
|
@ -50,7 +50,7 @@ struct iommu_domain_geometry {
|
|||
};
|
||||
|
||||
struct iommu_domain {
|
||||
struct iommu_ops *ops;
|
||||
const struct iommu_ops *ops;
|
||||
void *priv;
|
||||
iommu_fault_handler_t handler;
|
||||
void *handler_token;
|
||||
|
@ -140,7 +140,7 @@ struct iommu_ops {
|
|||
#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
|
||||
#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
|
||||
|
||||
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
|
||||
extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
|
||||
extern bool iommu_present(struct bus_type *bus);
|
||||
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
|
||||
extern struct iommu_group *iommu_group_get_by_id(int id);
|
||||
|
@ -181,11 +181,18 @@ extern int iommu_group_register_notifier(struct iommu_group *group,
|
|||
extern int iommu_group_unregister_notifier(struct iommu_group *group,
|
||||
struct notifier_block *nb);
|
||||
extern int iommu_group_id(struct iommu_group *group);
|
||||
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
|
||||
|
||||
extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
|
||||
void *data);
|
||||
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
|
||||
void *data);
|
||||
struct device *iommu_device_create(struct device *parent, void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...);
|
||||
void iommu_device_destroy(struct device *dev);
|
||||
int iommu_device_link(struct device *dev, struct device *link);
|
||||
void iommu_device_unlink(struct device *dev, struct device *link);
|
||||
|
||||
/* Window handling function prototypes */
|
||||
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
|
||||
|
@ -396,6 +403,27 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct device *iommu_device_create(struct device *parent,
|
||||
void *drvdata,
|
||||
const struct attribute_group **groups,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline void iommu_device_destroy(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iommu_device_link(struct device *dev, struct device *link)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void iommu_device_unlink(struct device *dev, struct device *link)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IOMMU_API */
|
||||
|
||||
#endif /* __LINUX_IOMMU_H */
|
||||
|
|
|
@ -34,6 +34,11 @@ struct iova_domain {
|
|||
unsigned long dma_32bit_pfn;
|
||||
};
|
||||
|
||||
static inline unsigned long iova_size(struct iova *iova)
|
||||
{
|
||||
return iova->pfn_hi - iova->pfn_lo + 1;
|
||||
}
|
||||
|
||||
struct iova *alloc_iova_mem(void);
|
||||
void free_iova_mem(struct iova *iova);
|
||||
void free_iova(struct iova_domain *iovad, unsigned long pfn);
|
||||
|
|
|
@ -10,41 +10,8 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_IOMMU_H_
|
||||
#define _INTEL_IOMMU_H_
|
||||
|
||||
struct iovm_struct {
|
||||
struct omap_iommu *iommu; /* iommu object which this belongs to */
|
||||
u32 da_start; /* area definition */
|
||||
u32 da_end;
|
||||
u32 flags; /* IOVMF_: see below */
|
||||
struct list_head list; /* linked in ascending order */
|
||||
const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */
|
||||
void *va; /* mpu side mapped address */
|
||||
};
|
||||
|
||||
#define MMU_RAM_ENDIAN_SHIFT 9
|
||||
#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
|
||||
#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT)
|
||||
#define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE
|
||||
#define MMU_RAM_ELSZ_SHIFT 7
|
||||
#define IOVMF_ELSZ_8 MMU_RAM_ELSZ_8
|
||||
|
||||
struct iommu_domain;
|
||||
|
||||
extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da);
|
||||
extern u32
|
||||
omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
|
||||
const struct sg_table *sgt, u32 flags);
|
||||
extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain,
|
||||
struct device *dev, u32 da);
|
||||
extern u32
|
||||
omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev,
|
||||
u32 da, size_t bytes, u32 flags);
|
||||
extern void
|
||||
omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
|
||||
const u32 da);
|
||||
extern void *omap_da_to_va(struct device *dev, u32 da);
|
||||
#ifndef _OMAP_IOMMU_H_
|
||||
#define _OMAP_IOMMU_H_
|
||||
|
||||
extern void omap_iommu_save_ctx(struct device *dev);
|
||||
extern void omap_iommu_restore_ctx(struct device *dev);
|
||||
|
|
|
@ -31,14 +31,10 @@ struct omap_iommu_arch_data {
|
|||
|
||||
/**
|
||||
* struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod
|
||||
* @da_start: device address where the va space starts.
|
||||
* @da_end: device address where the va space ends.
|
||||
* @nr_tlb_entries: number of entries supported by the translation
|
||||
* look-aside buffer (TLB).
|
||||
*/
|
||||
struct omap_mmu_dev_attr {
|
||||
u32 da_start;
|
||||
u32 da_end;
|
||||
int nr_tlb_entries;
|
||||
};
|
||||
|
||||
|
@ -46,8 +42,6 @@ struct iommu_platform_data {
|
|||
const char *name;
|
||||
const char *reset_name;
|
||||
int nr_tlb_entries;
|
||||
u32 da_start;
|
||||
u32 da_end;
|
||||
|
||||
int (*assert_reset)(struct platform_device *pdev, const char *name);
|
||||
int (*deassert_reset)(struct platform_device *pdev, const char *name);
|
||||
|
|
Loading…
Reference in New Issue