IOMMU Updates for Linux v3.20

This time with:
 
 	* Generic page-table framework for ARM IOMMUs using the LPAE page-table
 	  format, ARM-SMMU and Renesas IPMMU make use of it already.
 
 	* Break out of the IO virtual address allocator from the Intel IOMMU so
 	  that it can be used by other DMA-API implementations too. The first
 	  user will be the ARM64 common DMA-API implementation for IOMMUs
 
 	* Device tree support for Renesas IPMMU
 
 	* Various fixes and cleanups all over the place
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJU3MJOAAoJECvwRC2XARrjopUP+wachFx8vb00M4hlnlwL6FCn
 DyIFkA1n4wL0muPhjcBI+LViEXrSxjr2TYoJEaBg+fiByWWQ1Hefg+KPz331Lo1D
 +uo7WiOa1AB3pfkQiUN9IN6xx+o6ivhb3UQPiL4FHjggB/qz+KVxMM9nx0j8o0fQ
 D9q6HLFiOIsFkra3xZaSuDGvYUBpcwyfn8FP1HVfvLlg1uxIGDcUJX3qU5UBpj9q
 al/lPZ4A7rp+JLApV6WyouPiyVOZKikb5x920KeRNBem7a9fNBdgf+x7QbKpNXa1
 5MaT5MarwGe8lJE4wtjOqRtsllhia+A1rg/6JbROPrlGetRFiuIh2sCKLvwOCko/
 IjBHSutpaRT1lFoAG0TAnXQlvHRG/58XxOlP3eF613X/p8/cezuUaTyTIwZam9X3
 j2GWwbUcBiHTxlu7bQDPz6a7cTf4w6wEALzYl18QrAFv+2LqlCfOo/LSlpStmjrF
 kRN8DYaohlTULvmFneSr8rfGsnp5yPgIPvdmqiSwTz/Ih7kYPgfLy6+v6IAHUqZj
 0n9oGs8eMqVvSzM2qqmyA9WGuQZRyhNjj4iDwn/he5YMw2kqxUQYGMpLnSu0Oi48
 n4PqodtVol64jKLwaHZwyU8u71iyjUC5K9TDot/I2wlSRcTELJhxGh6c1sfDLyrO
 u/htIszgKCgFvVrQoEZB
 =dwrA
 -----END PGP SIGNATURE-----

Merge tag 'iommu-updates-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "This time with:

   - Generic page-table framework for ARM IOMMUs using the LPAE
     page-table format, ARM-SMMU and Renesas IPMMU make use of it
     already.

   - Break out the IO virtual address allocator from the Intel IOMMU so
     that it can be used by other DMA-API implementations too.  The
     first user will be the ARM64 common DMA-API implementation for
     IOMMUs

   - Device tree support for Renesas IPMMU

   - Various fixes and cleanups all over the place"

* tag 'iommu-updates-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits)
  iommu/amd: Convert non-returned local variable to boolean when relevant
  iommu: Update my email address
  iommu/amd: Use wait_event in put_pasid_state_wait
  iommu/amd: Fix amd_iommu_free_device()
  iommu/arm-smmu: Avoid build warning
  iommu/fsl: Various cleanups
  iommu/fsl: Use %pa to print phys_addr_t
  iommu/omap: Print phys_addr_t using %pa
  iommu: Make more drivers depend on COMPILE_TEST
  iommu/ipmmu-vmsa: Fix IOMMU lookup when multiple IOMMUs are registered
  iommu: Disable on !MMU builds
  iommu/fsl: Remove unused fsl_of_pamu_ids[]
  iommu/fsl: Fix section mismatch
  iommu/ipmmu-vmsa: Use the ARM LPAE page table allocator
  iommu: Fix trace_map() to report original iova and original size
  iommu/arm-smmu: add support for iova_to_phys through ATS1PR
  iopoll: Introduce memory-mapped IO polling macros
  iommu/arm-smmu: don't touch the secure STLBIALL register
  iommu/arm-smmu: make use of generic LPAE allocator
  iommu: io-pgtable-arm: add non-secure quirk
  ...
This commit is contained in:
Linus Torvalds 2015-02-12 09:16:56 -08:00
commit a26be149fa
28 changed files with 2251 additions and 1504 deletions

View File

@ -0,0 +1,41 @@
* Renesas VMSA-Compatible IOMMU
The IPMMU is an IOMMU implementation compatible with the ARM VMSA page tables.
It provides address translation for bus masters outside of the CPU, each
connected to the IPMMU through a port called micro-TLB.
Required Properties:
- compatible: Must contain "renesas,ipmmu-vmsa".
- reg: Base address and size of the IPMMU registers.
- interrupts: Specifiers for the MMU fault interrupts. For instances that
support secure mode two interrupts must be specified, for non-secure and
secure mode, in that order. For instances that don't support secure mode a
single interrupt must be specified.
- #iommu-cells: Must be 1.
Each bus master connected to an IPMMU must reference the IPMMU in its device
node with the following property:
- iommus: A reference to the IPMMU in two cells. The first cell is a phandle
to the IPMMU and the second cell the number of the micro-TLB that the
device is connected to.
Example: R8A7791 IPMMU-MX and VSP1-D0 bus master
ipmmu_mx: mmu@fe951000 {
compatible = "renasas,ipmmu-vmsa";
reg = <0 0xfe951000 0 0x1000>;
interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
<0 221 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
};
vsp1@fe928000 {
...
iommus = <&ipmmu_mx 13>;
...
};

View File

@ -1606,6 +1606,7 @@ M: Will Deacon <will.deacon@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: drivers/iommu/arm-smmu.c
F: drivers/iommu/io-pgtable-arm.c
ARM64 PORT (AARCH64 ARCHITECTURE)
M: Catalin Marinas <catalin.marinas@arm.com>

View File

@ -350,7 +350,6 @@ config ARM64_VA_BITS_42
config ARM64_VA_BITS_48
bool "48-bit"
depends on !ARM_SMMU
endchoice

View File

@ -32,8 +32,8 @@ enum pamu_stash_target {
*/
struct pamu_stash_attribute {
u32 cpu; /* cpu number */
u32 cache; /* cache to stash to: L1,L2,L3 */
u32 cpu; /* cpu number */
u32 cache; /* cache to stash to: L1,L2,L3 */
};
#endif /* __FSL_PAMU_STASH_H */

View File

@ -4,6 +4,7 @@ config IOMMU_API
menuconfig IOMMU_SUPPORT
bool "IOMMU Hardware Support"
depends on MMU
default y
---help---
Say Y here if you want to compile device drivers for IO Memory
@ -13,13 +14,43 @@ menuconfig IOMMU_SUPPORT
if IOMMU_SUPPORT
menu "Generic IOMMU Pagetable Support"
# Selected by the actual pagetable implementations
config IOMMU_IO_PGTABLE
bool
config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
sizes at both stage-1 and stage-2, as well as address spaces
up to 48-bits in size.
config IOMMU_IO_PGTABLE_LPAE_SELFTEST
bool "LPAE selftests"
depends on IOMMU_IO_PGTABLE_LPAE
help
Enable self-tests for LPAE page table allocator. This performs
a series of page-table consistency checks during boot.
If unsure, say N here.
endmenu
config IOMMU_IOVA
bool
config OF_IOMMU
def_bool y
depends on OF && IOMMU_API
config FSL_PAMU
bool "Freescale IOMMU support"
depends on PPC_E500MC
depends on PPC32
depends on PPC_E500MC || COMPILE_TEST
select IOMMU_API
select GENERIC_ALLOCATOR
help
@ -30,7 +61,8 @@ config FSL_PAMU
# MSM IOMMU support
config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARCH_MSM8X60 || ARCH_MSM8960
depends on ARM
depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
@ -91,6 +123,7 @@ config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
select IOMMU_API
select IOMMU_IOVA
select DMAR_TABLE
help
DMA remapping (DMAR) devices support enables independent address
@ -140,7 +173,8 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
depends on ARCH_OMAP2PLUS
depends on ARM && MMU
depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
config OMAP_IOMMU_DEBUG
@ -187,7 +221,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
depends on ARCH_EXYNOS && ARM
depends on ARCH_EXYNOS && ARM && MMU
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
@ -216,7 +250,7 @@ config SHMOBILE_IPMMU_TLB
config SHMOBILE_IOMMU
bool "IOMMU for Renesas IPMMU/IPMMUI"
default n
depends on ARM
depends on ARM && MMU
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@ -287,6 +321,7 @@ config IPMMU_VMSA
depends on ARM_LPAE
depends on ARCH_SHMOBILE || COMPILE_TEST
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
help
Support for the Renesas VMSA-compatible IPMMU Renesas found in the
@ -304,13 +339,13 @@ config SPAPR_TCE_IOMMU
config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support"
depends on ARM64 || (ARM_LPAE && OF)
depends on (ARM64 || ARM) && MMU
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
help
Support for implementations of the ARM System MMU architecture
versions 1 and 2. The driver supports both v7l and v8l table
formats with 4k and 64k page sizes.
versions 1 and 2.
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.

View File

@ -1,13 +1,16 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it
@ -843,10 +843,10 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
size_t size, u16 domid, int pde)
{
u64 pages;
int s;
bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
s = 0;
s = false;
if (pages > 1) {
/*
@ -854,7 +854,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
s = 1;
s = true;
}
address &= PAGE_MASK;
@ -874,10 +874,10 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
u64 address, size_t size)
{
u64 pages;
int s;
bool s;
pages = iommu_num_pages(address, size, PAGE_SIZE);
s = 0;
s = false;
if (pages > 1) {
/*
@ -885,7 +885,7 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
* TLB entries for this domain
*/
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
s = 1;
s = true;
}
address &= PAGE_MASK;

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2009-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2010 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Author: Joerg Roedel <jroedel@suse.de>
* Leo Duran <leo.duran@amd.com>
*
* This program is free software; you can redistribute it and/or modify it

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@ -31,7 +31,7 @@
#include "amd_iommu_proto.h"
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512
@ -151,18 +151,6 @@ static void put_device_state(struct device_state *dev_state)
wake_up(&dev_state->wq);
}
static void put_device_state_wait(struct device_state *dev_state)
{
DEFINE_WAIT(wait);
prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
if (!atomic_dec_and_test(&dev_state->count))
schedule();
finish_wait(&dev_state->wq, &wait);
free_device_state(dev_state);
}
/* Must be called under dev_state->lock */
static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
int pasid, bool alloc)
@ -278,14 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state_wait(struct pasid_state *pasid_state)
{
DEFINE_WAIT(wait);
prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
if (!atomic_dec_and_test(&pasid_state->count))
schedule();
finish_wait(&pasid_state->wq, &wait);
wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
free_pasid_state(pasid_state);
}
@ -851,7 +832,13 @@ void amd_iommu_free_device(struct pci_dev *pdev)
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
put_device_state_wait(dev_state);
put_device_state(dev_state);
/*
* Wait until the last reference is dropped before freeing
* the device state.
*/
wait_event(dev_state->wq, !atomic_read(&dev_state->count));
free_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
@ -921,7 +908,7 @@ static int __init amd_iommu_v2_init(void)
{
int ret;
pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
pr_info("AMD IOMMUv2 driver by Joerg Roedel <jroedel@suse.de>\n");
if (!amd_iommu_v2_supported()) {
pr_info("AMD IOMMUv2 functionality not available on this system\n");

File diff suppressed because it is too large Load Diff

View File

@ -18,23 +18,14 @@
#define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__
#include <linux/init.h>
#include <linux/iommu.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/of_platform.h>
#include <linux/bootmem.h>
#include <linux/genalloc.h>
#include <asm/io.h>
#include <asm/bitops.h>
#include <asm/fsl_guts.h>
#include "fsl_pamu.h"
#include <linux/interrupt.h>
#include <linux/genalloc.h>
#include <asm/mpc85xx.h>
#include <asm/fsl_guts.h>
/* define indexes for each operation mapping scenario */
#define OMI_QMAN 0x00
#define OMI_FMAN 0x01
@ -44,13 +35,13 @@
#define make64(high, low) (((u64)(high) << 32) | (low))
struct pamu_isr_data {
void __iomem *pamu_reg_base; /* Base address of PAMU regs*/
void __iomem *pamu_reg_base; /* Base address of PAMU regs */
unsigned int count; /* The number of PAMUs */
};
static struct paace *ppaact;
static struct paace *spaact;
static struct ome *omt;
static struct ome *omt __initdata;
/*
* Table for matching compatible strings, for device tree
@ -58,14 +49,13 @@ static struct ome *omt;
* "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used.
*/
static const struct of_device_id guts_device_ids[] = {
*/
static const struct of_device_id guts_device_ids[] __initconst = {
{ .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", },
{}
};
/*
* Table for matching compatible strings, for device tree
* L3 cache controller node.
@ -73,7 +63,7 @@ static const struct of_device_id guts_device_ids[] = {
* "fsl,b4860-l3-cache-controller" corresponds to B4 &
* "fsl,p4080-l3-cache-controller" corresponds to other,
* SOCs.
*/
*/
static const struct of_device_id l3_device_ids[] = {
{ .compatible = "fsl,t4240-l3-cache-controller", },
{ .compatible = "fsl,b4860-l3-cache-controller", },
@ -85,7 +75,7 @@ static const struct of_device_id l3_device_ids[] = {
static u32 max_subwindow_count;
/* Pool for fspi allocation */
struct gen_pool *spaace_pool;
static struct gen_pool *spaace_pool;
/**
* pamu_get_max_subwin_cnt() - Return the maximum supported
@ -170,7 +160,7 @@ int pamu_disable_liodn(int liodn)
static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
{
/* Bug if not a power of 2 */
BUG_ON((addrspace_size & (addrspace_size - 1)));
BUG_ON(addrspace_size & (addrspace_size - 1));
/* window size is 2^(WSE+1) bytes */
return fls64(addrspace_size) - 2;
@ -179,8 +169,8 @@ static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
/* Derive the PAACE window count encoding for the subwindow count */
static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
{
/* window count is 2^(WCE+1) bytes */
return __ffs(subwindow_cnt) - 1;
/* window count is 2^(WCE+1) bytes */
return __ffs(subwindow_cnt) - 1;
}
/*
@ -241,7 +231,7 @@ static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
* If no SPAACE entry is available or the allocator can not reserve the required
* number of contiguous entries function returns ULONG_MAX indicating a failure.
*
*/
*/
static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
{
unsigned long spaace_addr;
@ -288,9 +278,8 @@ int pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
if (!paace) {
if (!paace)
return -ENOENT;
}
}
set_bf(paace->impl_attr, PAACE_IA_CID, value);
@ -311,14 +300,12 @@ int pamu_disable_spaace(int liodn, u32 subwin)
}
if (subwin) {
paace = pamu_get_spaace(paace, subwin - 1);
if (!paace) {
if (!paace)
return -ENOENT;
}
set_bf(paace->addr_bitfields, PAACE_AF_V,
PAACE_V_INVALID);
set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
} else {
set_bf(paace->addr_bitfields, PAACE_AF_AP,
PAACE_AP_PERMS_DENIED);
PAACE_AP_PERMS_DENIED);
}
mb();
@ -326,7 +313,6 @@ int pamu_disable_spaace(int liodn, u32 subwin)
return 0;
}
/**
* pamu_config_paace() - Sets up PPAACE entry for specified liodn
*
@ -352,7 +338,8 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
unsigned long fspi;
if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
pr_debug("window size too small or not a power of two %llx\n", win_size);
pr_debug("window size too small or not a power of two %pa\n",
&win_size);
return -EINVAL;
}
@ -362,13 +349,12 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
}
ppaace = pamu_get_ppaace(liodn);
if (!ppaace) {
if (!ppaace)
return -ENOENT;
}
/* window size is 2^(WSE+1) bytes */
set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
map_addrspace_size_to_wse(win_size));
map_addrspace_size_to_wse(win_size));
pamu_init_ppaace(ppaace);
@ -442,7 +428,6 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
{
struct paace *paace;
/* setup sub-windows */
if (!subwin_cnt) {
pr_debug("Invalid subwindow count\n");
@ -510,11 +495,11 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
}
/**
* get_ome_index() - Returns the index in the operation mapping table
* for device.
* @*omi_index: pointer for storing the index value
*
*/
* get_ome_index() - Returns the index in the operation mapping table
* for device.
* @*omi_index: pointer for storing the index value
*
*/
void get_ome_index(u32 *omi_index, struct device *dev)
{
if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
@ -544,9 +529,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
node = of_find_matching_node(NULL, l3_device_ids);
if (node) {
prop = of_get_property(node, "cache-stash-id", 0);
prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
pr_debug("missing cache-stash-id at %s\n", node->full_name);
pr_debug("missing cache-stash-id at %s\n",
node->full_name);
of_node_put(node);
return ~(u32)0;
}
@ -570,9 +556,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
/* find the hwnode that represents the cache */
for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
if (stash_dest_hint == cache_level) {
prop = of_get_property(node, "cache-stash-id", 0);
prop = of_get_property(node, "cache-stash-id", NULL);
if (!prop) {
pr_debug("missing cache-stash-id at %s\n", node->full_name);
pr_debug("missing cache-stash-id at %s\n",
node->full_name);
of_node_put(node);
return ~(u32)0;
}
@ -580,10 +567,10 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
return be32_to_cpup(prop);
}
prop = of_get_property(node, "next-level-cache", 0);
prop = of_get_property(node, "next-level-cache", NULL);
if (!prop) {
pr_debug("can't find next-level-cache at %s\n",
node->full_name);
node->full_name);
of_node_put(node);
return ~(u32)0; /* can't traverse any further */
}
@ -598,7 +585,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
}
pr_debug("stash dest not found for %d on vcpu %d\n",
stash_dest_hint, vcpu);
stash_dest_hint, vcpu);
return ~(u32)0;
}
@ -612,7 +599,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
* Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them.
*/
static void setup_qbman_paace(struct paace *ppaace, int paace_type)
static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
{
switch (paace_type) {
case QMAN_PAACE:
@ -626,7 +613,7 @@ static void setup_qbman_paace(struct paace *ppaace, int paace_type)
case QMAN_PORTAL_PAACE:
set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
ppaace->op_encode.index_ot.omi = OMI_QMAN;
/*Set DQRR and Frame stashing for the L3 cache */
/* Set DQRR and Frame stashing for the L3 cache */
set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
break;
case BMAN_PAACE:
@ -679,7 +666,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries
* and subwindows supported by PAMU
*/
static void get_pamu_cap_values(unsigned long pamu_reg_base)
static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
{
u32 pc_val;
@ -689,9 +676,9 @@ static void get_pamu_cap_values(unsigned long pamu_reg_base)
}
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
phys_addr_t omt_phys)
static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
phys_addr_t omt_phys)
{
u32 *pc;
struct pamu_mmap_regs *pamu_regs;
@ -727,7 +714,7 @@ int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
*/
out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
PAMU_ACCESS_VIOLATION_ENABLE);
PAMU_ACCESS_VIOLATION_ENABLE);
out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
return 0;
}
@ -757,9 +744,9 @@ static void __init setup_liodns(void)
ppaace->wbah = 0;
set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
set_bf(ppaace->impl_attr, PAACE_IA_ATM,
PAACE_ATM_NO_XLATE);
PAACE_ATM_NO_XLATE);
set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
PAACE_AP_PERMS_ALL);
PAACE_AP_PERMS_ALL);
if (of_device_is_compatible(node, "fsl,qman-portal"))
setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
if (of_device_is_compatible(node, "fsl,qman"))
@ -772,7 +759,7 @@ static void __init setup_liodns(void)
}
}
irqreturn_t pamu_av_isr(int irq, void *arg)
static irqreturn_t pamu_av_isr(int irq, void *arg)
{
struct pamu_isr_data *data = arg;
phys_addr_t phys;
@ -792,14 +779,16 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
pr_emerg("AVS1=%08x\n", avs1);
pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
pr_emerg("AVA=%016llx\n", make64(in_be32(p + PAMU_AVAH),
in_be32(p + PAMU_AVAL)));
pr_emerg("AVA=%016llx\n",
make64(in_be32(p + PAMU_AVAH),
in_be32(p + PAMU_AVAL)));
pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
pr_emerg("POEA=%016llx\n", make64(in_be32(p + PAMU_POEAH),
in_be32(p + PAMU_POEAL)));
pr_emerg("POEA=%016llx\n",
make64(in_be32(p + PAMU_POEAH),
in_be32(p + PAMU_POEAL)));
phys = make64(in_be32(p + PAMU_POEAH),
in_be32(p + PAMU_POEAL));
in_be32(p + PAMU_POEAL));
/* Assume that POEA points to a PAACE */
if (phys) {
@ -807,11 +796,12 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Only the first four words are relevant */
for (j = 0; j < 4; j++)
pr_emerg("PAACE[%u]=%08x\n", j, in_be32(paace + j));
pr_emerg("PAACE[%u]=%08x\n",
j, in_be32(paace + j));
}
/* clear access violation condition */
out_be32((p + PAMU_AVS1), avs1 & PAMU_AV_MASK);
out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(!paace);
/* check if we got a violation for a disabled LIODN */
@ -827,13 +817,13 @@ irqreturn_t pamu_av_isr(int irq, void *arg)
/* Disable the LIODN */
ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
BUG_ON(ret);
pr_emerg("Disabling liodn %x\n", avs1 >> PAMU_AVS1_LIODN_SHIFT);
pr_emerg("Disabling liodn %x\n",
avs1 >> PAMU_AVS1_LIODN_SHIFT);
}
out_be32((p + PAMU_PICS), pics);
}
}
return IRQ_HANDLED;
}
@ -952,7 +942,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
}
if (i == 0 || i == num_laws) {
/* This should never happen*/
/* This should never happen */
ret = -ENOENT;
goto error;
}
@ -998,26 +988,27 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
static const struct {
u32 svr;
u32 port_id;
} port_id_map[] = {
{0x82100010, 0xFF000000}, /* P2040 1.0 */
{0x82100011, 0xFF000000}, /* P2040 1.1 */
{0x82100110, 0xFF000000}, /* P2041 1.0 */
{0x82100111, 0xFF000000}, /* P2041 1.1 */
{0x82110310, 0xFF000000}, /* P3041 1.0 */
{0x82110311, 0xFF000000}, /* P3041 1.1 */
{0x82010020, 0xFFF80000}, /* P4040 2.0 */
{0x82000020, 0xFFF80000}, /* P4080 2.0 */
{0x82210010, 0xFC000000}, /* P5010 1.0 */
{0x82210020, 0xFC000000}, /* P5010 2.0 */
{0x82200010, 0xFC000000}, /* P5020 1.0 */
{0x82050010, 0xFF800000}, /* P5021 1.0 */
{0x82040010, 0xFF800000}, /* P5040 1.0 */
} port_id_map[] __initconst = {
{(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
{(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
{(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
{(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */
{(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */
{(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */
{(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */
{(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */
{(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */
{(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */
{(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */
{(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */
{(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */
};
#define SVR_SECURITY 0x80000 /* The Security (E) bit */
static int __init fsl_pamu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL;
struct ccsr_guts __iomem *guts_regs = NULL;
u32 pamubypenr, pamu_counter;
@ -1042,22 +1033,21 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
* NOTE : All PAMUs share the same LIODN tables.
*/
pamu_regs = of_iomap(pdev->dev.of_node, 0);
pamu_regs = of_iomap(dev->of_node, 0);
if (!pamu_regs) {
dev_err(&pdev->dev, "ioremap of PAMU node failed\n");
dev_err(dev, "ioremap of PAMU node failed\n");
return -ENOMEM;
}
of_get_address(pdev->dev.of_node, 0, &size, NULL);
of_get_address(dev->of_node, 0, &size, NULL);
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
irq = irq_of_parse_and_map(dev->of_node, 0);
if (irq == NO_IRQ) {
dev_warn(&pdev->dev, "no interrupts listed in PAMU node\n");
dev_warn(dev, "no interrupts listed in PAMU node\n");
goto error;
}
data = kzalloc(sizeof(struct pamu_isr_data), GFP_KERNEL);
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&pdev->dev, "PAMU isr data memory allocation failed\n");
ret = -ENOMEM;
goto error;
}
@ -1067,15 +1057,14 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* The ISR needs access to the regs, so we won't iounmap them */
ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
if (ret < 0) {
dev_err(&pdev->dev, "error %i installing ISR for irq %i\n",
ret, irq);
dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq);
goto error;
}
guts_node = of_find_matching_node(NULL, guts_device_ids);
if (!guts_node) {
dev_err(&pdev->dev, "could not find GUTS node %s\n",
pdev->dev.of_node->full_name);
dev_err(dev, "could not find GUTS node %s\n",
dev->of_node->full_name);
ret = -ENODEV;
goto error;
}
@ -1083,7 +1072,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
guts_regs = of_iomap(guts_node, 0);
of_node_put(guts_node);
if (!guts_regs) {
dev_err(&pdev->dev, "ioremap of GUTS node failed\n");
dev_err(dev, "ioremap of GUTS node failed\n");
ret = -ENODEV;
goto error;
}
@ -1103,7 +1092,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!p) {
dev_err(&pdev->dev, "unable to allocate PAACT/SPAACT/OMT block\n");
dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n");
ret = -ENOMEM;
goto error;
}
@ -1113,7 +1102,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
/* Make sure the memory is naturally aligned */
if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
dev_err(&pdev->dev, "PAACT/OMT block is unaligned\n");
dev_err(dev, "PAACT/OMT block is unaligned\n");
ret = -ENOMEM;
goto error;
}
@ -1121,8 +1110,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
dev_dbg(&pdev->dev, "ppaact virt=%p phys=0x%llx\n", ppaact,
(unsigned long long) ppaact_phys);
dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys);
/* Check to see if we need to implement the work-around on this SOC */
@ -1130,21 +1118,19 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
csd_port_id = port_id_map[i].port_id;
dev_dbg(&pdev->dev, "found matching SVR %08x\n",
dev_dbg(dev, "found matching SVR %08x\n",
port_id_map[i].svr);
break;
}
}
if (csd_port_id) {
dev_dbg(&pdev->dev, "creating coherency subdomain at address "
"0x%llx, size %zu, port id 0x%08x", ppaact_phys,
mem_size, csd_port_id);
dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x",
&ppaact_phys, mem_size, csd_port_id);
ret = create_csd(ppaact_phys, mem_size, csd_port_id);
if (ret) {
dev_err(&pdev->dev, "could not create coherence "
"subdomain\n");
dev_err(dev, "could not create coherence subdomain\n");
return ret;
}
}
@ -1155,7 +1141,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
if (!spaace_pool) {
ret = -ENOMEM;
dev_err(&pdev->dev, "PAMU : failed to allocate spaace gen pool\n");
dev_err(dev, "Failed to allocate spaace gen pool\n");
goto error;
}
@ -1168,9 +1154,9 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
pamu_reg_base = (unsigned long) pamu_regs + pamu_reg_off;
pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off;
setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
spaact_phys, omt_phys);
spaact_phys, omt_phys);
/* Disable PAMU bypass for this PAMU */
pamubypenr &= ~pamu_counter;
}
@ -1182,7 +1168,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
iounmap(guts_regs);
/* Enable DMA for the LIODNs in the device tree*/
/* Enable DMA for the LIODNs in the device tree */
setup_liodns();
@ -1214,17 +1200,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
return ret;
}
static const struct of_device_id fsl_of_pamu_ids[] = {
{
.compatible = "fsl,p4080-pamu",
},
{
.compatible = "fsl,pamu",
},
{},
};
static struct platform_driver fsl_of_pamu_driver = {
static struct platform_driver fsl_of_pamu_driver __initdata = {
.driver = {
.name = "fsl-of-pamu",
},

View File

@ -19,13 +19,15 @@
#ifndef __FSL_PAMU_H
#define __FSL_PAMU_H
#include <linux/iommu.h>
#include <asm/fsl_pamu_stash.h>
/* Bit Field macros
* v = bit field variable; m = mask, m##_SHIFT = shift, x = value to load
*/
#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << (m##_SHIFT)) & (m)))
#define get_bf(v, m) (((v) & (m)) >> (m##_SHIFT))
#define set_bf(v, m, x) (v = ((v) & ~(m)) | (((x) << m##_SHIFT) & (m)))
#define get_bf(v, m) (((v) & (m)) >> m##_SHIFT)
/* PAMU CCSR space */
#define PAMU_PGC 0x00000000 /* Allows all peripheral accesses */
@ -65,7 +67,7 @@ struct pamu_mmap_regs {
#define PAMU_AVS1_GCV 0x2000
#define PAMU_AVS1_PDV 0x4000
#define PAMU_AV_MASK (PAMU_AVS1_AV | PAMU_AVS1_OTV | PAMU_AVS1_APV | PAMU_AVS1_WAV \
| PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
| PAMU_AVS1_LAV | PAMU_AVS1_GCV | PAMU_AVS1_PDV)
#define PAMU_AVS1_LIODN_SHIFT 16
#define PAMU_LAV_LIODN_NOT_IN_PPAACT 0x400
@ -198,8 +200,7 @@ struct pamu_mmap_regs {
#define PAACE_ATM_NO_XLATE 0x00
#define PAACE_ATM_WINDOW_XLATE 0x01
#define PAACE_ATM_PAGE_XLATE 0x02
#define PAACE_ATM_WIN_PG_XLATE \
(PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
#define PAACE_ATM_WIN_PG_XLATE (PAACE_ATM_WINDOW_XLATE | PAACE_ATM_PAGE_XLATE)
#define PAACE_OTM_NO_XLATE 0x00
#define PAACE_OTM_IMMEDIATE 0x01
#define PAACE_OTM_INDEXED 0x02
@ -219,7 +220,7 @@ struct pamu_mmap_regs {
#define PAACE_TCEF_FORMAT0_8B 0x00
#define PAACE_TCEF_FORMAT1_RSVD 0x01
/*
* Hard coded value for the PAACT size to accomodate
* Hard coded value for the PAACT size to accommodate
* maximum LIODN value generated by u-boot.
*/
#define PAACE_NUMBER_ENTRIES 0x500
@ -332,7 +333,7 @@ struct paace {
#define NUM_MOE 128
struct ome {
u8 moe[NUM_MOE];
} __attribute__((packed));
} __packed;
#define PAACT_SIZE (sizeof(struct paace) * PAACE_NUMBER_ENTRIES)
#define SPAACT_SIZE (sizeof(struct paace) * SPAACE_NUMBER_ENTRIES)

View File

@ -19,26 +19,10 @@
#define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
#include <linux/init.h>
#include <linux/iommu.h>
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/of_platform.h>
#include <linux/bootmem.h>
#include <linux/err.h>
#include <asm/io.h>
#include <asm/bitops.h>
#include <asm/pci-bridge.h>
#include <sysdev/fsl_pci.h>
#include "fsl_pamu_domain.h"
#include <sysdev/fsl_pci.h>
/*
* Global spinlock that needs to be held while
* configuring PAMU.
@ -51,23 +35,21 @@ static DEFINE_SPINLOCK(device_domain_lock);
static int __init iommu_init_mempool(void)
{
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
sizeof(struct fsl_dma_domain),
0,
SLAB_HWCACHE_ALIGN,
NULL);
sizeof(struct fsl_dma_domain),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!fsl_pamu_domain_cache) {
pr_debug("Couldn't create fsl iommu_domain cache\n");
return -ENOMEM;
}
iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
sizeof(struct device_domain_info),
0,
SLAB_HWCACHE_ALIGN,
NULL);
sizeof(struct device_domain_info),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!iommu_devinfo_cache) {
pr_debug("Couldn't create devinfo cache\n");
kmem_cache_destroy(fsl_pamu_domain_cache);
@ -80,8 +62,7 @@ static int __init iommu_init_mempool(void)
static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
{
u32 win_cnt = dma_domain->win_cnt;
struct dma_window *win_ptr =
&dma_domain->win_arr[0];
struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom;
geom = &dma_domain->iommu_domain->geometry;
@ -103,22 +84,20 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
}
if (win_ptr->valid)
return (win_ptr->paddr + (iova & (win_ptr->size - 1)));
return win_ptr->paddr + (iova & (win_ptr->size - 1));
return 0;
}
static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
{
struct dma_window *sub_win_ptr =
&dma_domain->win_arr[0];
struct dma_window *sub_win_ptr = &dma_domain->win_arr[0];
int i, ret;
unsigned long rpn, flags;
for (i = 0; i < dma_domain->win_cnt; i++) {
if (sub_win_ptr[i].valid) {
rpn = sub_win_ptr[i].paddr >>
PAMU_PAGE_SHIFT;
rpn = sub_win_ptr[i].paddr >> PAMU_PAGE_SHIFT;
spin_lock_irqsave(&iommu_lock, flags);
ret = pamu_config_spaace(liodn, dma_domain->win_cnt, i,
sub_win_ptr[i].size,
@ -130,7 +109,7 @@ static int map_subwins(int liodn, struct fsl_dma_domain *dma_domain)
sub_win_ptr[i].prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
pr_debug("PAMU SPAACE configuration failed for liodn %d\n",
pr_debug("SPAACE configuration failed for liodn %d\n",
liodn);
return ret;
}
@ -156,8 +135,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
0, wnd->prot);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret)
pr_debug("PAMU PAACE configuration failed for liodn %d\n",
liodn);
pr_debug("PAACE configuration failed for liodn %d\n", liodn);
return ret;
}
@ -169,7 +147,6 @@ static int map_liodn(int liodn, struct fsl_dma_domain *dma_domain)
return map_subwins(liodn, dma_domain);
else
return map_win(liodn, dma_domain);
}
/* Update window/subwindow mapping for the LIODN */
@ -190,7 +167,8 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
(wnd_nr > 0) ? 1 : 0,
wnd->prot);
if (ret)
pr_debug("Subwindow reconfiguration failed for liodn %d\n", liodn);
pr_debug("Subwindow reconfiguration failed for liodn %d\n",
liodn);
} else {
phys_addr_t wnd_addr;
@ -200,10 +178,11 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
wnd->size,
~(u32)0,
wnd->paddr >> PAMU_PAGE_SHIFT,
dma_domain->snoop_id, dma_domain->stash_id,
0, wnd->prot);
dma_domain->snoop_id, dma_domain->stash_id,
0, wnd->prot);
if (ret)
pr_debug("Window reconfiguration failed for liodn %d\n", liodn);
pr_debug("Window reconfiguration failed for liodn %d\n",
liodn);
}
spin_unlock_irqrestore(&iommu_lock, flags);
@ -212,14 +191,15 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
}
static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
u32 val)
u32 val)
{
int ret = 0, i;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
if (!dma_domain->win_arr) {
pr_debug("Windows not configured, stash destination update failed for liodn %d\n", liodn);
pr_debug("Windows not configured, stash destination update failed for liodn %d\n",
liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return -EINVAL;
}
@ -227,7 +207,8 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
for (i = 0; i < dma_domain->win_cnt; i++) {
ret = pamu_update_paace_stash(liodn, i, val);
if (ret) {
pr_debug("Failed to update SPAACE %d field for liodn %d\n ", i, liodn);
pr_debug("Failed to update SPAACE %d field for liodn %d\n ",
i, liodn);
spin_unlock_irqrestore(&iommu_lock, flags);
return ret;
}
@ -240,9 +221,9 @@ static int update_liodn_stash(int liodn, struct fsl_dma_domain *dma_domain,
/* Set the geometry parameters for a LIODN */
static int pamu_set_liodn(int liodn, struct device *dev,
struct fsl_dma_domain *dma_domain,
struct iommu_domain_geometry *geom_attr,
u32 win_cnt)
struct fsl_dma_domain *dma_domain,
struct iommu_domain_geometry *geom_attr,
u32 win_cnt)
{
phys_addr_t window_addr, window_size;
phys_addr_t subwin_size;
@ -268,7 +249,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
dma_domain->stash_id, win_cnt, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
pr_debug("PAMU PAACE configuration failed for liodn %d, win_cnt =%d\n", liodn, win_cnt);
pr_debug("PAACE configuration failed for liodn %d, win_cnt =%d\n",
liodn, win_cnt);
return ret;
}
@ -285,7 +267,8 @@ static int pamu_set_liodn(int liodn, struct device *dev,
0, 0);
spin_unlock_irqrestore(&iommu_lock, flags);
if (ret) {
pr_debug("PAMU SPAACE configuration failed for liodn %d\n", liodn);
pr_debug("SPAACE configuration failed for liodn %d\n",
liodn);
return ret;
}
}
@ -301,13 +284,13 @@ static int check_size(u64 size, dma_addr_t iova)
* to PAMU page size.
*/
if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
pr_debug("%s: size too small or not a power of two\n", __func__);
pr_debug("Size too small or not a power of two\n");
return -EINVAL;
}
/* iova must be page size aligned*/
/* iova must be page size aligned */
if (iova & (size - 1)) {
pr_debug("%s: address is not aligned with window size\n", __func__);
pr_debug("Address is not aligned with window size\n");
return -EINVAL;
}
@ -396,16 +379,15 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
if (!dev->archdata.iommu_domain)
dev->archdata.iommu_domain = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
}
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
dma_addr_t iova)
{
struct fsl_dma_domain *dma_domain = domain->priv;
if ((iova < domain->geometry.aperture_start) ||
iova > (domain->geometry.aperture_end))
if (iova < domain->geometry.aperture_start ||
iova > domain->geometry.aperture_end)
return 0;
return get_phys_addr(dma_domain, iova);
@ -460,7 +442,7 @@ static int pamu_set_domain_geometry(struct fsl_dma_domain *dma_domain,
list_for_each_entry(info, &dma_domain->devices, link) {
ret = pamu_set_liodn(info->liodn, info->dev, dma_domain,
geom_attr, win_cnt);
geom_attr, win_cnt);
if (ret)
break;
}
@ -543,7 +525,6 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
}
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@ -576,7 +557,7 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
win_size = dma_domain->geom_size >> ilog2(dma_domain->win_cnt);
if (size > win_size) {
pr_debug("Invalid window size \n");
pr_debug("Invalid window size\n");
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -EINVAL;
}
@ -622,8 +603,8 @@ static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
* and window mappings.
*/
static int handle_attach_device(struct fsl_dma_domain *dma_domain,
struct device *dev, const u32 *liodn,
int num)
struct device *dev, const u32 *liodn,
int num)
{
unsigned long flags;
struct iommu_domain *domain = dma_domain->iommu_domain;
@ -632,11 +613,10 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
spin_lock_irqsave(&dma_domain->domain_lock, flags);
for (i = 0; i < num; i++) {
/* Ensure that LIODN value is valid */
if (liodn[i] >= PAACE_NUMBER_ENTRIES) {
pr_debug("Invalid liodn %d, attach device failed for %s\n",
liodn[i], dev->of_node->full_name);
liodn[i], dev->of_node->full_name);
ret = -EINVAL;
break;
}
@ -649,9 +629,9 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
*/
if (dma_domain->win_arr) {
u32 win_cnt = dma_domain->win_cnt > 1 ? dma_domain->win_cnt : 0;
ret = pamu_set_liodn(liodn[i], dev, dma_domain,
&domain->geometry,
win_cnt);
&domain->geometry, win_cnt);
if (ret)
break;
if (dma_domain->mapped) {
@ -698,19 +678,18 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
liodn = of_get_property(dev->of_node, "fsl,liodn", &len);
if (liodn) {
liodn_cnt = len / sizeof(u32);
ret = handle_attach_device(dma_domain, dev,
liodn, liodn_cnt);
ret = handle_attach_device(dma_domain, dev, liodn, liodn_cnt);
} else {
pr_debug("missing fsl,liodn property at %s\n",
dev->of_node->full_name);
ret = -EINVAL;
dev->of_node->full_name);
ret = -EINVAL;
}
return ret;
}
static void fsl_pamu_detach_device(struct iommu_domain *domain,
struct device *dev)
struct device *dev)
{
struct fsl_dma_domain *dma_domain = domain->priv;
const u32 *prop;
@ -738,7 +717,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
detach_device(dev, dma_domain);
else
pr_debug("missing fsl,liodn property at %s\n",
dev->of_node->full_name);
dev->of_node->full_name);
}
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
@ -754,10 +733,10 @@ static int configure_domain_geometry(struct iommu_domain *domain, void *data)
* DMA outside of the geometry.
*/
if (check_size(geom_size, geom_attr->aperture_start) ||
!geom_attr->force_aperture) {
pr_debug("Invalid PAMU geometry attributes\n");
return -EINVAL;
}
!geom_attr->force_aperture) {
pr_debug("Invalid PAMU geometry attributes\n");
return -EINVAL;
}
spin_lock_irqsave(&dma_domain->domain_lock, flags);
if (dma_domain->enabled) {
@ -786,7 +765,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
spin_lock_irqsave(&dma_domain->domain_lock, flags);
memcpy(&dma_domain->dma_stash, stash_attr,
sizeof(struct pamu_stash_attribute));
sizeof(struct pamu_stash_attribute));
dma_domain->stash_id = get_stash_id(stash_attr->cache,
stash_attr->cpu);
@ -803,7 +782,7 @@ static int configure_domain_stash(struct fsl_dma_domain *dma_domain, void *data)
return ret;
}
/* Configure domain dma state i.e. enable/disable DMA*/
/* Configure domain dma state i.e. enable/disable DMA */
static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool enable)
{
struct device_domain_info *info;
@ -819,8 +798,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
dma_domain->enabled = enable;
list_for_each_entry(info, &dma_domain->devices,
link) {
list_for_each_entry(info, &dma_domain->devices, link) {
ret = (enable) ? pamu_enable_liodn(info->liodn) :
pamu_disable_liodn(info->liodn);
if (ret)
@ -833,12 +811,11 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
}
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
switch (attr_type) {
case DOMAIN_ATTR_GEOMETRY:
ret = configure_domain_geometry(domain, data);
@ -853,22 +830,21 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
};
}
return ret;
}
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
int ret = 0;
switch (attr_type) {
case DOMAIN_ATTR_FSL_PAMU_STASH:
memcpy((struct pamu_stash_attribute *) data, &dma_domain->dma_stash,
sizeof(struct pamu_stash_attribute));
memcpy(data, &dma_domain->dma_stash,
sizeof(struct pamu_stash_attribute));
break;
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
*(int *)data = dma_domain->enabled;
@ -880,7 +856,7 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
break;
};
}
return ret;
}
@ -903,11 +879,8 @@ static bool check_pci_ctl_endpt_part(struct pci_controller *pci_ctl)
/* Check the PCI controller version number by readding BRR1 register */
version = in_be32(pci_ctl->cfg_addr + (PCI_FSL_BRR1 >> 2));
version &= PCI_FSL_BRR1_VER;
/* If PCI controller version is >= 0x204 we can partition endpoints*/
if (version >= 0x204)
return 1;
return 0;
/* If PCI controller version is >= 0x204 we can partition endpoints */
return version >= 0x204;
}
/* Get iommu group information from peer devices or devices on the parent bus */
@ -968,8 +941,9 @@ static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
if (pci_ctl->parent->iommu_group) {
group = get_device_iommu_group(pci_ctl->parent);
iommu_group_remove_device(pci_ctl->parent);
} else
} else {
group = get_shared_pci_device_group(pdev);
}
}
if (!group)
@ -1055,11 +1029,12 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
}
ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
((w_count > 1) ? w_count : 0));
w_count > 1 ? w_count : 0);
if (!ret) {
kfree(dma_domain->win_arr);
dma_domain->win_arr = kzalloc(sizeof(struct dma_window) *
w_count, GFP_ATOMIC);
dma_domain->win_arr = kcalloc(w_count,
sizeof(*dma_domain->win_arr),
GFP_ATOMIC);
if (!dma_domain->win_arr) {
spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
return -ENOMEM;
@ -1095,7 +1070,7 @@ static const struct iommu_ops fsl_pamu_ops = {
.remove_device = fsl_pamu_remove_device,
};
int pamu_domain_init(void)
int __init pamu_domain_init(void)
{
int ret = 0;

View File

@ -71,6 +71,9 @@
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
@ -485,7 +488,6 @@ __setup("intel_iommu=", intel_iommu_setup);
static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
static struct kmem_cache *iommu_iova_cache;
static inline void *alloc_pgtable_page(int node)
{
@ -523,16 +525,6 @@ static inline void free_devinfo_mem(void *vaddr)
kmem_cache_free(iommu_devinfo_cache, vaddr);
}
struct iova *alloc_iova_mem(void)
{
return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
}
void free_iova_mem(struct iova *iova)
{
kmem_cache_free(iommu_iova_cache, iova);
}
static inline int domain_type_is_vm(struct dmar_domain *domain)
{
return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
@ -1643,7 +1635,8 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova;
int i;
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key);
@ -1701,7 +1694,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
int adjust_width, agaw;
unsigned long sagaw;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */
@ -3427,23 +3421,6 @@ static inline int iommu_devinfo_cache_init(void)
return ret;
}
static inline int iommu_iova_cache_init(void)
{
int ret = 0;
iommu_iova_cache = kmem_cache_create("iommu_iova",
sizeof(struct iova),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!iommu_iova_cache) {
printk(KERN_ERR "Couldn't create iova cache\n");
ret = -ENOMEM;
}
return ret;
}
static int __init iommu_init_mempool(void)
{
int ret;
@ -3461,7 +3438,7 @@ static int __init iommu_init_mempool(void)
kmem_cache_destroy(iommu_domain_cache);
domain_error:
kmem_cache_destroy(iommu_iova_cache);
iommu_iova_cache_destroy();
return -ENOMEM;
}
@ -3470,8 +3447,7 @@ static void __init iommu_exit_mempool(void)
{
kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache);
kmem_cache_destroy(iommu_iova_cache);
iommu_iova_cache_destroy();
}
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
@ -4342,7 +4318,8 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{
int adjust_width;
init_iova_domain(&domain->iovad, DMA_32BIT_PFN);
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
DMA_32BIT_PFN);
domain_reserve_special_ranges(domain);
/* calculate AGAW */

View File

@ -0,0 +1,986 @@
/*
* CPU-agnostic ARM page table allocator.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2014 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
#include <linux/iommu.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "io-pgtable.h"
#define ARM_LPAE_MAX_ADDR_BITS 48
#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
#define ARM_LPAE_MAX_LEVELS 4
/* Struct accessors */
#define io_pgtable_to_data(x) \
container_of((x), struct arm_lpae_io_pgtable, iop)
#define io_pgtable_ops_to_pgtable(x) \
container_of((x), struct io_pgtable, ops)
#define io_pgtable_ops_to_data(x) \
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
/*
* For consistency with the architecture, we always consider
* ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
*/
#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
/*
* Calculate the right shift amount to get to the portion describing level l
* in a virtual address mapped by the pagetable in d.
*/
#define ARM_LPAE_LVL_SHIFT(l,d) \
((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
* (d)->bits_per_level) + (d)->pg_shift)
#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift)
/*
* Calculate the index at level l used to map virtual address a using the
* pagetable in d.
*/
#define ARM_LPAE_PGD_IDX(l,d) \
((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
#define ARM_LPAE_LVL_IDX(a,l,d) \
(((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
/* Calculate the block/page mapping size at level l for pagetable in d. */
#define ARM_LPAE_BLOCK_SIZE(l,d) \
(1 << (ilog2(sizeof(arm_lpae_iopte)) + \
((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
/* Page table bits */
#define ARM_LPAE_PTE_TYPE_SHIFT 0
#define ARM_LPAE_PTE_TYPE_MASK 0x3
#define ARM_LPAE_PTE_TYPE_BLOCK 1
#define ARM_LPAE_PTE_TYPE_TABLE 3
#define ARM_LPAE_PTE_TYPE_PAGE 3
#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
/* Ignore the contiguous bit for block splitting */
#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
ARM_LPAE_PTE_ATTR_HI_MASK)
/* Stage-1 PTE */
#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
/* Stage-2 PTE */
#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
/* Register bits */
#define ARM_32_LPAE_TCR_EAE (1 << 31)
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
#define ARM_LPAE_TCR_TG0_4K (0 << 14)
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
#define ARM_LPAE_TCR_SH0_SHIFT 12
#define ARM_LPAE_TCR_SH0_MASK 0x3
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
#define ARM_LPAE_TCR_ORGN0_SHIFT 10
#define ARM_LPAE_TCR_IRGN0_SHIFT 8
#define ARM_LPAE_TCR_RGN_MASK 0x3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
#define ARM_LPAE_TCR_RGN_WT 2
#define ARM_LPAE_TCR_RGN_WB 3
#define ARM_LPAE_TCR_SL0_SHIFT 6
#define ARM_LPAE_TCR_SL0_MASK 0x3
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
#define ARM_LPAE_TCR_SZ_MASK 0xf
#define ARM_LPAE_TCR_PS_SHIFT 16
#define ARM_LPAE_TCR_PS_MASK 0x7
#define ARM_LPAE_TCR_IPS_SHIFT 32
#define ARM_LPAE_TCR_IPS_MASK 0x7
#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
#define ARM_LPAE_MAIR_ATTR_NC 0x44
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
/* IOPTE accessors */
#define iopte_deref(pte,d) \
(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
& ~((1ULL << (d)->pg_shift) - 1)))
#define iopte_type(pte,l) \
(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
#define iopte_leaf(pte,l) \
(l == (ARM_LPAE_MAX_LEVELS - 1) ? \
(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
(iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
#define iopte_to_pfn(pte,d) \
(((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
#define pfn_to_iopte(pfn,d) \
(((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
struct arm_lpae_io_pgtable {
struct io_pgtable iop;
int levels;
size_t pgd_size;
unsigned long pg_shift;
unsigned long bits_per_level;
void *pgd;
};
typedef u64 arm_lpae_iopte;
static bool selftest_running = false;
static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, phys_addr_t paddr,
arm_lpae_iopte prot, int lvl,
arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte = prot;
/* We require an unmap first */
if (iopte_leaf(*ptep, lvl)) {
WARN_ON(!selftest_running);
return -EEXIST;
}
if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NS;
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
pte |= ARM_LPAE_PTE_TYPE_PAGE;
else
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
*ptep = pte;
data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
return 0;
}
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
int lvl, arm_lpae_iopte *ptep)
{
arm_lpae_iopte *cptep, pte;
void *cookie = data->iop.cookie;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
/* Find our entry at the current level */
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */
if (size == block_size && (size & data->iop.cfg.pgsize_bitmap))
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */
if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
return -EINVAL;
/* Grab a pointer to the next level */
pte = *ptep;
if (!pte) {
cptep = alloc_pages_exact(1UL << data->pg_shift,
GFP_ATOMIC | __GFP_ZERO);
if (!cptep)
return -ENOMEM;
data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
cookie);
pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE;
*ptep = pte;
data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
} else {
cptep = iopte_deref(pte, data);
}
/* Rinse, repeat */
return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
}
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
int prot)
{
arm_lpae_iopte pte;
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
} else {
pte = ARM_LPAE_PTE_HAP_FAULT;
if (prot & IOMMU_READ)
pte |= ARM_LPAE_PTE_HAP_READ;
if (prot & IOMMU_WRITE)
pte |= ARM_LPAE_PTE_HAP_WRITE;
if (prot & IOMMU_CACHE)
pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
else
pte |= ARM_LPAE_PTE_MEMATTR_NC;
}
if (prot & IOMMU_NOEXEC)
pte |= ARM_LPAE_PTE_XN;
return pte;
}
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
}
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
arm_lpae_iopte *ptep)
{
arm_lpae_iopte *start, *end;
unsigned long table_size;
/* Only leaf entries at the last level */
if (lvl == ARM_LPAE_MAX_LEVELS - 1)
return;
if (lvl == ARM_LPAE_START_LVL(data))
table_size = data->pgd_size;
else
table_size = 1UL << data->pg_shift;
start = ptep;
end = (void *)ptep + table_size;
while (ptep != end) {
arm_lpae_iopte pte = *ptep++;
if (!pte || iopte_leaf(pte, lvl))
continue;
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
}
free_pages_exact(start, table_size);
}
static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
__arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
kfree(data);
}
static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size,
arm_lpae_iopte prot, int lvl,
arm_lpae_iopte *ptep, size_t blk_size)
{
unsigned long blk_start, blk_end;
phys_addr_t blk_paddr;
arm_lpae_iopte table = 0;
void *cookie = data->iop.cookie;
const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size;
blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
arm_lpae_iopte *tablep;
/* Unmap! */
if (blk_start == iova)
continue;
/* __arm_lpae_map expects a pointer to the start of the table */
tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
tablep) < 0) {
if (table) {
/* Free the table we allocated */
tablep = iopte_deref(table, data);
__arm_lpae_free_pgtable(data, lvl + 1, tablep);
}
return 0; /* Bytes unmapped */
}
}
*ptep = table;
tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
iova &= ~(blk_size - 1);
tlb->tlb_add_flush(iova, blk_size, true, cookie);
return size;
}
static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep)
{
arm_lpae_iopte pte;
const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
void *cookie = data->iop.cookie;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
pte = *ptep;
/* Something went horribly wrong and we ran out of page table */
if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
return 0;
/* If the size matches this level, we're in the right place */
if (size == blk_size) {
*ptep = 0;
tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, false, cookie);
tlb->tlb_sync(data->iop.cookie);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else {
tlb->tlb_add_flush(iova, size, true, cookie);
}
return size;
} else if (iopte_leaf(pte, lvl)) {
/*
* Insert a table at the next level to map the old region,
* minus the part we want to unmap
*/
return arm_lpae_split_blk_unmap(data, iova, size,
iopte_prot(pte), lvl, ptep,
blk_size);
}
/* Keep on walkin' */
ptep = iopte_deref(pte, data);
return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
}
static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped)
iop->cfg.tlb->tlb_sync(iop->cookie);
return unmapped;
}
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
unsigned long iova)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte pte, *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data);
do {
/* Valid IOPTE pointer? */
if (!ptep)
return 0;
/* Grab the IOPTE we're interested in */
pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
/* Valid entry? */
if (!pte)
return 0;
/* Leaf entry? */
if (iopte_leaf(pte,lvl))
goto found_translation;
/* Take it to the next level */
ptep = iopte_deref(pte, data);
} while (++lvl < ARM_LPAE_MAX_LEVELS);
/* Ran out of page tables to walk */
return 0;
found_translation:
iova &= ((1 << data->pg_shift) - 1);
return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{
unsigned long granule;
/*
* We need to restrict the supported page sizes to match the
* translation regime for a particular granule. Aim to match
* the CPU page size if possible, otherwise prefer smaller sizes.
* While we're at it, restrict the block sizes to match the
* chosen granule.
*/
if (cfg->pgsize_bitmap & PAGE_SIZE)
granule = PAGE_SIZE;
else if (cfg->pgsize_bitmap & ~PAGE_MASK)
granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
else if (cfg->pgsize_bitmap & PAGE_MASK)
granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
else
granule = 0;
switch (granule) {
case SZ_4K:
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
break;
case SZ_16K:
cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
break;
case SZ_64K:
cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
break;
default:
cfg->pgsize_bitmap = 0;
}
}
static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
unsigned long va_bits, pgd_bits;
struct arm_lpae_io_pgtable *data;
arm_lpae_restrict_pgsizes(cfg);
if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
return NULL;
if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
return NULL;
if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
return NULL;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
data->pg_shift = __ffs(cfg->pgsize_bitmap);
data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
va_bits = cfg->ias - data->pg_shift;
data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
/* Calculate the actual size of our pgd (without concatenation) */
pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
data->iop.ops = (struct io_pgtable_ops) {
.map = arm_lpae_map,
.unmap = arm_lpae_unmap,
.iova_to_phys = arm_lpae_iova_to_phys,
};
return data;
}
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
u64 reg;
struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
/* TCR */
reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
switch (1 << data->pg_shift) {
case SZ_4K:
reg |= ARM_LPAE_TCR_TG0_4K;
break;
case SZ_16K:
reg |= ARM_LPAE_TCR_TG0_16K;
break;
case SZ_64K:
reg |= ARM_LPAE_TCR_TG0_64K;
break;
}
switch (cfg->oas) {
case 32:
reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
case 36:
reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
case 40:
reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
case 42:
reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
case 44:
reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
case 48:
reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
break;
default:
goto out_free_data;
}
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */
reg = (ARM_LPAE_MAIR_ATTR_NC
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
(ARM_LPAE_MAIR_ATTR_WBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_LPAE_MAIR_ATTR_DEVICE
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
cfg->arm_lpae_s1_cfg.mair[0] = reg;
cfg->arm_lpae_s1_cfg.mair[1] = 0;
/* Looking good; allocate a pgd */
data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
if (!data->pgd)
goto out_free_data;
cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
/* TTBRs */
cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
return &data->iop;
out_free_data:
kfree(data);
return NULL;
}
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
{
u64 reg, sl;
struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
if (!data)
return NULL;
/*
* Concatenate PGDs at level 1 if possible in order to reduce
* the depth of the stage-2 walk.
*/
if (data->levels == ARM_LPAE_MAX_LEVELS) {
unsigned long pgd_pages;
pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
data->pgd_size = pgd_pages << data->pg_shift;
data->levels--;
}
}
/* VTCR */
reg = ARM_64_LPAE_S2_TCR_RES1 |
(ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
sl = ARM_LPAE_START_LVL(data);
switch (1 << data->pg_shift) {
case SZ_4K:
reg |= ARM_LPAE_TCR_TG0_4K;
sl++; /* SL0 format is different for 4K granule size */
break;
case SZ_16K:
reg |= ARM_LPAE_TCR_TG0_16K;
break;
case SZ_64K:
reg |= ARM_LPAE_TCR_TG0_64K;
break;
}
switch (cfg->oas) {
case 32:
reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
case 36:
reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
case 40:
reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
case 42:
reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
case 44:
reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
case 48:
reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
break;
default:
goto out_free_data;
}
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO);
if (!data->pgd)
goto out_free_data;
cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie);
/* VTTBR */
cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
return &data->iop;
out_free_data:
kfree(data);
return NULL;
}
static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
struct io_pgtable *iop;
if (cfg->ias > 32 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
if (iop) {
cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
}
return iop;
}
static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
{
struct io_pgtable *iop;
if (cfg->ias > 40 || cfg->oas > 40)
return NULL;
cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
if (iop)
cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
return iop;
}
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
};
struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
.alloc = arm_64_lpae_alloc_pgtable_s2,
.free = arm_lpae_free_pgtable,
};
struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
.alloc = arm_32_lpae_alloc_pgtable_s1,
.free = arm_lpae_free_pgtable,
};
struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
.alloc = arm_32_lpae_alloc_pgtable_s2,
.free = arm_lpae_free_pgtable,
};
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
static struct io_pgtable_cfg *cfg_cookie;
static void dummy_tlb_flush_all(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
void *cookie)
{
WARN_ON(cookie != cfg_cookie);
WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
}
static void dummy_tlb_sync(void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
static struct iommu_gather_ops dummy_tlb_ops __initdata = {
.tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync,
.flush_pgtable = dummy_flush_pgtable,
};
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
cfg->pgsize_bitmap, cfg->ias);
pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
data->levels, data->pgd_size, data->pg_shift,
data->bits_per_level, data->pgd);
}
#define __FAIL(ops, i) ({ \
WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
arm_lpae_dump_ops(ops); \
selftest_running = false; \
-EFAULT; \
})
static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
{
static const enum io_pgtable_fmt fmts[] = {
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
};
int i, j;
unsigned long iova;
size_t size;
struct io_pgtable_ops *ops;
selftest_running = true;
for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
cfg_cookie = cfg;
ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
if (!ops) {
pr_err("selftest: failed to allocate io pgtable ops\n");
return -ENOMEM;
}
/*
* Initial sanity checks.
* Empty page tables shouldn't provide any translations.
*/
if (ops->iova_to_phys(ops, 42))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, SZ_1G + 42))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, SZ_2G + 42))
return __FAIL(ops, i);
/*
* Distinct mappings of different granule sizes.
*/
iova = 0;
j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
while (j != BITS_PER_LONG) {
size = 1UL << j;
if (ops->map(ops, iova, iova, size, IOMMU_READ |
IOMMU_WRITE |
IOMMU_NOEXEC |
IOMMU_CACHE))
return __FAIL(ops, i);
/* Overlapping mappings */
if (!ops->map(ops, iova, iova + size, size,
IOMMU_READ | IOMMU_NOEXEC))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
return __FAIL(ops, i);
iova += SZ_1G;
j++;
j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
}
/* Partial unmap */
size = 1UL << __ffs(cfg->pgsize_bitmap);
if (ops->unmap(ops, SZ_1G + size, size) != size)
return __FAIL(ops, i);
/* Remap of partial unmap */
if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
return __FAIL(ops, i);
/* Full unmap */
iova = 0;
j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
while (j != BITS_PER_LONG) {
size = 1UL << j;
if (ops->unmap(ops, iova, size) != size)
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42))
return __FAIL(ops, i);
/* Remap full block */
if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
return __FAIL(ops, i);
if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
return __FAIL(ops, i);
iova += SZ_1G;
j++;
j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
}
free_io_pgtable_ops(ops);
}
selftest_running = false;
return 0;
}
static int __init arm_lpae_do_selftests(void)
{
static const unsigned long pgsize[] = {
SZ_4K | SZ_2M | SZ_1G,
SZ_16K | SZ_32M,
SZ_64K | SZ_512M,
};
static const unsigned int ias[] = {
32, 36, 40, 42, 44, 48,
};
int i, j, pass = 0, fail = 0;
struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops,
.oas = 48,
};
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
for (j = 0; j < ARRAY_SIZE(ias); ++j) {
cfg.pgsize_bitmap = pgsize[i];
cfg.ias = ias[j];
pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
pgsize[i], ias[j]);
if (arm_lpae_run_tests(&cfg))
fail++;
else
pass++;
}
}
pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
return fail ? -EFAULT : 0;
}
subsys_initcall(arm_lpae_do_selftests);
#endif

View File

@ -0,0 +1,82 @@
/*
* Generic page table allocator for IOMMUs.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2014 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include "io-pgtable.h"
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
static const struct io_pgtable_init_fns *
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
{
#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
[ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
[ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
[ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
[ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
#endif
};
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
struct io_pgtable_cfg *cfg,
void *cookie)
{
struct io_pgtable *iop;
const struct io_pgtable_init_fns *fns;
if (fmt >= IO_PGTABLE_NUM_FMTS)
return NULL;
fns = io_pgtable_init_table[fmt];
if (!fns)
return NULL;
iop = fns->alloc(cfg, cookie);
if (!iop)
return NULL;
iop->fmt = fmt;
iop->cookie = cookie;
iop->cfg = *cfg;
return &iop->ops;
}
/*
* It is the IOMMU driver's responsibility to ensure that the page table
* is no longer accessible to the walker by this point.
*/
void free_io_pgtable_ops(struct io_pgtable_ops *ops)
{
struct io_pgtable *iop;
if (!ops)
return;
iop = container_of(ops, struct io_pgtable, ops);
iop->cfg.tlb->tlb_flush_all(iop->cookie);
io_pgtable_init_table[iop->fmt]->free(iop);
}

143
drivers/iommu/io-pgtable.h Normal file
View File

@ -0,0 +1,143 @@
#ifndef __IO_PGTABLE_H
#define __IO_PGTABLE_H
/*
* Public API for use by IOMMU drivers
*/
enum io_pgtable_fmt {
ARM_32_LPAE_S1,
ARM_32_LPAE_S2,
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
IO_PGTABLE_NUM_FMTS,
};
/**
* struct iommu_gather_ops - IOMMU callbacks for TLB and page table management.
*
* @tlb_flush_all: Synchronously invalidate the entire TLB context.
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
* @tlb_sync: Ensure any queue TLB invalidation has taken effect.
* @flush_pgtable: Ensure page table updates are visible to the IOMMU.
*
* Note that these can all be called in atomic context and must therefore
* not block.
*/
struct iommu_gather_ops {
void (*tlb_flush_all)(void *cookie);
void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
void *cookie);
void (*tlb_sync)(void *cookie);
void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
};
/**
* struct io_pgtable_cfg - Configuration data for a set of page tables.
*
* @quirks: A bitmap of hardware quirks that require some special
* action by the low-level page table allocator.
* @pgsize_bitmap: A bitmap of page sizes supported by this set of page
* tables.
* @ias: Input address (iova) size, in bits.
* @oas: Output address (paddr) size, in bits.
* @tlb: TLB management callbacks for this set of tables.
*/
struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
int quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
unsigned int oas;
const struct iommu_gather_ops *tlb;
/* Low-level data specific to the table format */
union {
struct {
u64 ttbr[2];
u64 tcr;
u64 mair[2];
} arm_lpae_s1_cfg;
struct {
u64 vttbr;
u64 vtcr;
} arm_lpae_s2_cfg;
};
};
/**
* struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
*
* @map: Map a physically contiguous memory region.
* @unmap: Unmap a physically contiguous memory region.
* @iova_to_phys: Translate iova to physical address.
*
* These functions map directly onto the iommu_ops member functions with
* the same names.
*/
struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
};
/**
* alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU.
*
* @fmt: The page table format.
* @cfg: The page table configuration. This will be modified to represent
* the configuration actually provided by the allocator (e.g. the
* pgsize_bitmap may be restricted).
* @cookie: An opaque token provided by the IOMMU driver and passed back to
* the callback routines in cfg->tlb.
*/
struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
struct io_pgtable_cfg *cfg,
void *cookie);
/**
* free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller
* *must* ensure that the page table is no longer
* live, but the TLB can be dirty.
*
* @ops: The ops returned from alloc_io_pgtable_ops.
*/
void free_io_pgtable_ops(struct io_pgtable_ops *ops);
/*
* Internal structures for page table allocator implementations.
*/
/**
* struct io_pgtable - Internal structure describing a set of page tables.
*
* @fmt: The page table format.
* @cookie: An opaque token provided by the IOMMU driver and passed back to
* any callback routines.
* @cfg: A copy of the page table configuration.
* @ops: The page table operations in use for this set of page tables.
*/
struct io_pgtable {
enum io_pgtable_fmt fmt;
void *cookie;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops ops;
};
/**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format.
*
* @alloc: Allocate a set of page tables described by cfg.
* @free: Free the page tables associated with iop.
*/
struct io_pgtable_init_fns {
struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie);
void (*free)(struct io_pgtable *iop);
};
#endif /* __IO_PGTABLE_H */

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@ -1084,7 +1084,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (ret)
iommu_unmap(domain, orig_iova, orig_size - size);
else
trace_map(iova, paddr, size);
trace_map(orig_iova, paddr, orig_size);
return ret;
}
@ -1094,6 +1094,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
unsigned long orig_iova = iova;
if (unlikely(domain->ops->unmap == NULL ||
domain->ops->pgsize_bitmap == 0UL))
@ -1133,7 +1134,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unmapped += unmapped_page;
}
trace_unmap(iova, 0, size);
trace_unmap(orig_iova, size, unmapped);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);

View File

@ -18,13 +18,58 @@
*/
#include <linux/iova.h>
#include <linux/slab.h>
static struct kmem_cache *iommu_iova_cache;
int iommu_iova_cache_init(void)
{
int ret = 0;
iommu_iova_cache = kmem_cache_create("iommu_iova",
sizeof(struct iova),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!iommu_iova_cache) {
pr_err("Couldn't create iova cache\n");
ret = -ENOMEM;
}
return ret;
}
void iommu_iova_cache_destroy(void)
{
kmem_cache_destroy(iommu_iova_cache);
}
struct iova *alloc_iova_mem(void)
{
return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
}
void free_iova_mem(struct iova *iova)
{
kmem_cache_free(iommu_iova_cache, iova);
}
void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit)
{
/*
* IOVA granularity will normally be equal to the smallest
* supported IOMMU page size; both *must* be capable of
* representing individual CPU pages exactly.
*/
BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL;
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
}
@ -127,7 +172,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
if (!curr) {
if (size_aligned)
pad_size = iova_get_pad_size(size, limit_pfn);
if ((IOVA_START_PFN + size + pad_size) > limit_pfn) {
if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return -ENOMEM;
}
@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
* @size: - size of page frames to allocate
* @limit_pfn: - max limit address
* @size_aligned: - set if size_aligned address range is required
* This function allocates an iova in the range limit_pfn to IOVA_START_PFN
* looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
* This function allocates an iova in the range iovad->start_pfn to limit_pfn,
* searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
* flag is set then the allocated address iova->pfn_lo will be naturally
* aligned on roundup_power_of_two(size).
*/

View File

@ -16,7 +16,7 @@
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/platform_data/ipmmu-vmsa.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@ -24,12 +24,13 @@
#include <asm/dma-iommu.h>
#include <asm/pgalloc.h>
#include "io-pgtable.h"
struct ipmmu_vmsa_device {
struct device *dev;
void __iomem *base;
struct list_head list;
const struct ipmmu_vmsa_platform_data *pdata;
unsigned int num_utlbs;
struct dma_iommu_mapping *mapping;
@ -39,14 +40,17 @@ struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu;
struct iommu_domain *io_domain;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
unsigned int context_id;
spinlock_t lock; /* Protects mappings */
pgd_t *pgd;
};
struct ipmmu_vmsa_archdata {
struct ipmmu_vmsa_device *mmu;
unsigned int utlb;
unsigned int *utlbs;
unsigned int num_utlbs;
};
static DEFINE_SPINLOCK(ipmmu_devices_lock);
@ -58,6 +62,8 @@ static LIST_HEAD(ipmmu_devices);
* Registers Definition
*/
#define IM_NS_ALIAS_OFFSET 0x800
#define IM_CTX_SIZE 0x40
#define IMCTR 0x0000
@ -170,52 +176,6 @@ static LIST_HEAD(ipmmu_devices);
#define IMUASID_ASID0_MASK (0xff << 0)
#define IMUASID_ASID0_SHIFT 0
/* -----------------------------------------------------------------------------
* Page Table Bits
*/
/*
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory access,
* Long-descriptor format" that the NStable bit being set in a table descriptor
* will result in the NStable and NS bits of all child entries being ignored and
* considered as being set. The IPMMU seems not to comply with this, as it
* generates a secure access page fault if any of the NStable and NS bits isn't
* set when running in non-secure mode.
*/
#ifndef PMD_NSTABLE
#define PMD_NSTABLE (_AT(pmdval_t, 1) << 63)
#endif
#define ARM_VMSA_PTE_XN (((pteval_t)3) << 53)
#define ARM_VMSA_PTE_CONT (((pteval_t)1) << 52)
#define ARM_VMSA_PTE_AF (((pteval_t)1) << 10)
#define ARM_VMSA_PTE_SH_NS (((pteval_t)0) << 8)
#define ARM_VMSA_PTE_SH_OS (((pteval_t)2) << 8)
#define ARM_VMSA_PTE_SH_IS (((pteval_t)3) << 8)
#define ARM_VMSA_PTE_SH_MASK (((pteval_t)3) << 8)
#define ARM_VMSA_PTE_NS (((pteval_t)1) << 5)
#define ARM_VMSA_PTE_PAGE (((pteval_t)3) << 0)
/* Stage-1 PTE */
#define ARM_VMSA_PTE_nG (((pteval_t)1) << 11)
#define ARM_VMSA_PTE_AP_UNPRIV (((pteval_t)1) << 6)
#define ARM_VMSA_PTE_AP_RDONLY (((pteval_t)2) << 6)
#define ARM_VMSA_PTE_AP_MASK (((pteval_t)3) << 6)
#define ARM_VMSA_PTE_ATTRINDX_MASK (((pteval_t)3) << 2)
#define ARM_VMSA_PTE_ATTRINDX_SHIFT 2
#define ARM_VMSA_PTE_ATTRS_MASK \
(ARM_VMSA_PTE_XN | ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_nG | \
ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_MASK | ARM_VMSA_PTE_AP_MASK | \
ARM_VMSA_PTE_NS | ARM_VMSA_PTE_ATTRINDX_MASK)
#define ARM_VMSA_PTE_CONT_ENTRIES 16
#define ARM_VMSA_PTE_CONT_SIZE (PAGE_SIZE * ARM_VMSA_PTE_CONT_ENTRIES)
#define IPMMU_PTRS_PER_PTE 512
#define IPMMU_PTRS_PER_PMD 512
#define IPMMU_PTRS_PER_PGD 4
/* -----------------------------------------------------------------------------
* Read/Write Access
*/
@ -305,18 +265,39 @@ static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
ipmmu_write(mmu, IMUCTR(utlb), 0);
}
static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
size_t size)
static void ipmmu_tlb_flush_all(void *cookie)
{
unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
struct ipmmu_vmsa_domain *domain = cookie;
ipmmu_tlb_invalidate(domain);
}
static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
void *cookie)
{
/* The hardware doesn't support selective TLB flush. */
}
static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
{
unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
struct ipmmu_vmsa_domain *domain = cookie;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling.
*/
dma_map_page(mmu->dev, virt_to_page(addr), offset, size, DMA_TO_DEVICE);
dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
DMA_TO_DEVICE);
}
static struct iommu_gather_ops ipmmu_gather_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_add_flush = ipmmu_tlb_add_flush,
.tlb_sync = ipmmu_tlb_flush_all,
.flush_pgtable = ipmmu_flush_pgtable,
};
/* -----------------------------------------------------------------------------
* Domain/Context Management
*/
@ -324,7 +305,28 @@ static void ipmmu_flush_pgtable(struct ipmmu_vmsa_device *mmu, void *addr,
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
phys_addr_t ttbr;
u32 reg;
/*
* Allocate the page table operations.
*
* VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
* access, Long-descriptor format" that the NStable bit being set in a
* table descriptor will result in the NStable and NS bits of all child
* entries being ignored and considered as being set. The IPMMU seems
* not to comply with this, as it generates a secure access page fault
* if any of the NStable and NS bits isn't set when running in
* non-secure mode.
*/
domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops;
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
domain);
if (!domain->iop)
return -EINVAL;
/*
* TODO: When adding support for multiple contexts, find an unused
@ -333,9 +335,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->context_id = 0;
/* TTBR0 */
ipmmu_flush_pgtable(domain->mmu, domain->pgd,
IPMMU_PTRS_PER_PGD * sizeof(*domain->pgd));
ttbr = __pa(domain->pgd);
ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0];
ipmmu_ctx_write(domain, IMTTLBR0, ttbr);
ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32);
@ -348,15 +348,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1);
/*
* MAIR0
* We need three attributes only, non-cacheable, write-back read/write
* allocate and device memory.
*/
reg = (IMMAIR_ATTR_NC << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_NC))
| (IMMAIR_ATTR_WBRWA << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_WBRWA))
| (IMMAIR_ATTR_DEVICE << IMMAIR_ATTR_SHIFT(IMMAIR_ATTR_IDX_DEV));
ipmmu_ctx_write(domain, IMMAIR0, reg);
/* MAIR0 */
ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]);
/* IMBUSCR */
ipmmu_ctx_write(domain, IMBUSCR,
@ -460,396 +453,6 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
return ipmmu_domain_irq(domain);
}
/* -----------------------------------------------------------------------------
* Page Table Management
*/
#define pud_pgtable(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
static void ipmmu_free_ptes(pmd_t *pmd)
{
pgtable_t table = pmd_pgtable(*pmd);
__free_page(table);
}
static void ipmmu_free_pmds(pud_t *pud)
{
pmd_t *pmd = pmd_offset(pud, 0);
pgtable_t table;
unsigned int i;
for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
if (!pmd_table(*pmd))
continue;
ipmmu_free_ptes(pmd);
pmd++;
}
table = pud_pgtable(*pud);
__free_page(table);
}
static void ipmmu_free_pgtables(struct ipmmu_vmsa_domain *domain)
{
pgd_t *pgd, *pgd_base = domain->pgd;
unsigned int i;
/*
* Recursively free the page tables for this domain. We don't care about
* speculative TLB filling, because the TLB will be nuked next time this
* context bank is re-allocated and no devices currently map to these
* tables.
*/
pgd = pgd_base;
for (i = 0; i < IPMMU_PTRS_PER_PGD; ++i) {
if (pgd_none(*pgd))
continue;
ipmmu_free_pmds((pud_t *)pgd);
pgd++;
}
kfree(pgd_base);
}
/*
* We can't use the (pgd|pud|pmd|pte)_populate or the set_(pgd|pud|pmd|pte)
* functions as they would flush the CPU TLB.
*/
static pte_t *ipmmu_alloc_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
unsigned long iova)
{
pte_t *pte;
if (!pmd_none(*pmd))
return pte_offset_kernel(pmd, iova);
pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
if (!pte)
return NULL;
ipmmu_flush_pgtable(mmu, pte, PAGE_SIZE);
*pmd = __pmd(__pa(pte) | PMD_NSTABLE | PMD_TYPE_TABLE);
ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
return pte + pte_index(iova);
}
static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
unsigned long iova)
{
pud_t *pud = (pud_t *)pgd;
pmd_t *pmd;
if (!pud_none(*pud))
return pmd_offset(pud, iova);
pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
if (!pmd)
return NULL;
ipmmu_flush_pgtable(mmu, pmd, PAGE_SIZE);
*pud = __pud(__pa(pmd) | PMD_NSTABLE | PMD_TYPE_TABLE);
ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
return pmd + pmd_index(iova);
}
static u64 ipmmu_page_prot(unsigned int prot, u64 type)
{
u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
| ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
| ARM_VMSA_PTE_NS | type;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pgprot |= ARM_VMSA_PTE_AP_RDONLY;
if (prot & IOMMU_CACHE)
pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
if (prot & IOMMU_NOEXEC)
pgprot |= ARM_VMSA_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
/* If no access create a faulting entry to avoid TLB fills. */
pgprot &= ~ARM_VMSA_PTE_PAGE;
return pgprot;
}
static int ipmmu_alloc_init_pte(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
unsigned long iova, unsigned long pfn,
size_t size, int prot)
{
pteval_t pteval = ipmmu_page_prot(prot, ARM_VMSA_PTE_PAGE);
unsigned int num_ptes = 1;
pte_t *pte, *start;
unsigned int i;
pte = ipmmu_alloc_pte(mmu, pmd, iova);
if (!pte)
return -ENOMEM;
start = pte;
/*
* Install the page table entries. We can be called both for a single
* page or for a block of 16 physically contiguous pages. In the latter
* case set the PTE contiguous hint.
*/
if (size == SZ_64K) {
pteval |= ARM_VMSA_PTE_CONT;
num_ptes = ARM_VMSA_PTE_CONT_ENTRIES;
}
for (i = num_ptes; i; --i)
*pte++ = pfn_pte(pfn++, __pgprot(pteval));
ipmmu_flush_pgtable(mmu, start, sizeof(*pte) * num_ptes);
return 0;
}
static int ipmmu_alloc_init_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd,
unsigned long iova, unsigned long pfn,
int prot)
{
pmdval_t pmdval = ipmmu_page_prot(prot, PMD_TYPE_SECT);
*pmd = pfn_pmd(pfn, __pgprot(pmdval));
ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
return 0;
}
static int ipmmu_create_mapping(struct ipmmu_vmsa_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
struct ipmmu_vmsa_device *mmu = domain->mmu;
pgd_t *pgd = domain->pgd;
unsigned long flags;
unsigned long pfn;
pmd_t *pmd;
int ret;
if (!pgd)
return -EINVAL;
if (size & ~PAGE_MASK)
return -EINVAL;
if (paddr & ~((1ULL << 40) - 1))
return -ERANGE;
pfn = __phys_to_pfn(paddr);
pgd += pgd_index(iova);
/* Update the page tables. */
spin_lock_irqsave(&domain->lock, flags);
pmd = ipmmu_alloc_pmd(mmu, pgd, iova);
if (!pmd) {
ret = -ENOMEM;
goto done;
}
switch (size) {
case SZ_2M:
ret = ipmmu_alloc_init_pmd(mmu, pmd, iova, pfn, prot);
break;
case SZ_64K:
case SZ_4K:
ret = ipmmu_alloc_init_pte(mmu, pmd, iova, pfn, size, prot);
break;
default:
ret = -EINVAL;
break;
}
done:
spin_unlock_irqrestore(&domain->lock, flags);
if (!ret)
ipmmu_tlb_invalidate(domain);
return ret;
}
static void ipmmu_clear_pud(struct ipmmu_vmsa_device *mmu, pud_t *pud)
{
/* Free the page table. */
pgtable_t table = pud_pgtable(*pud);
__free_page(table);
/* Clear the PUD. */
*pud = __pud(0);
ipmmu_flush_pgtable(mmu, pud, sizeof(*pud));
}
static void ipmmu_clear_pmd(struct ipmmu_vmsa_device *mmu, pud_t *pud,
pmd_t *pmd)
{
unsigned int i;
/* Free the page table. */
if (pmd_table(*pmd)) {
pgtable_t table = pmd_pgtable(*pmd);
__free_page(table);
}
/* Clear the PMD. */
*pmd = __pmd(0);
ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
/* Check whether the PUD is still needed. */
pmd = pmd_offset(pud, 0);
for (i = 0; i < IPMMU_PTRS_PER_PMD; ++i) {
if (!pmd_none(pmd[i]))
return;
}
/* Clear the parent PUD. */
ipmmu_clear_pud(mmu, pud);
}
static void ipmmu_clear_pte(struct ipmmu_vmsa_device *mmu, pud_t *pud,
pmd_t *pmd, pte_t *pte, unsigned int num_ptes)
{
unsigned int i;
/* Clear the PTE. */
for (i = num_ptes; i; --i)
pte[i-1] = __pte(0);
ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * num_ptes);
/* Check whether the PMD is still needed. */
pte = pte_offset_kernel(pmd, 0);
for (i = 0; i < IPMMU_PTRS_PER_PTE; ++i) {
if (!pte_none(pte[i]))
return;
}
/* Clear the parent PMD. */
ipmmu_clear_pmd(mmu, pud, pmd);
}
static int ipmmu_split_pmd(struct ipmmu_vmsa_device *mmu, pmd_t *pmd)
{
pte_t *pte, *start;
pteval_t pteval;
unsigned long pfn;
unsigned int i;
pte = (pte_t *)get_zeroed_page(GFP_ATOMIC);
if (!pte)
return -ENOMEM;
/* Copy the PMD attributes. */
pteval = (pmd_val(*pmd) & ARM_VMSA_PTE_ATTRS_MASK)
| ARM_VMSA_PTE_CONT | ARM_VMSA_PTE_PAGE;
pfn = pmd_pfn(*pmd);
start = pte;
for (i = IPMMU_PTRS_PER_PTE; i; --i)
*pte++ = pfn_pte(pfn++, __pgprot(pteval));
ipmmu_flush_pgtable(mmu, start, PAGE_SIZE);
*pmd = __pmd(__pa(start) | PMD_NSTABLE | PMD_TYPE_TABLE);
ipmmu_flush_pgtable(mmu, pmd, sizeof(*pmd));
return 0;
}
static void ipmmu_split_pte(struct ipmmu_vmsa_device *mmu, pte_t *pte)
{
unsigned int i;
for (i = ARM_VMSA_PTE_CONT_ENTRIES; i; --i)
pte[i-1] = __pte(pte_val(*pte) & ~ARM_VMSA_PTE_CONT);
ipmmu_flush_pgtable(mmu, pte, sizeof(*pte) * ARM_VMSA_PTE_CONT_ENTRIES);
}
static int ipmmu_clear_mapping(struct ipmmu_vmsa_domain *domain,
unsigned long iova, size_t size)
{
struct ipmmu_vmsa_device *mmu = domain->mmu;
unsigned long flags;
pgd_t *pgd = domain->pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
int ret = 0;
if (!pgd)
return -EINVAL;
if (size & ~PAGE_MASK)
return -EINVAL;
pgd += pgd_index(iova);
pud = (pud_t *)pgd;
spin_lock_irqsave(&domain->lock, flags);
/* If there's no PUD or PMD we're done. */
if (pud_none(*pud))
goto done;
pmd = pmd_offset(pud, iova);
if (pmd_none(*pmd))
goto done;
/*
* When freeing a 2MB block just clear the PMD. In the unlikely case the
* block is mapped as individual pages this will free the corresponding
* PTE page table.
*/
if (size == SZ_2M) {
ipmmu_clear_pmd(mmu, pud, pmd);
goto done;
}
/*
* If the PMD has been mapped as a section remap it as pages to allow
* freeing individual pages.
*/
if (pmd_sect(*pmd))
ipmmu_split_pmd(mmu, pmd);
pte = pte_offset_kernel(pmd, iova);
/*
* When freeing a 64kB block just clear the PTE entries. We don't have
* to care about the contiguous hint of the surrounding entries.
*/
if (size == SZ_64K) {
ipmmu_clear_pte(mmu, pud, pmd, pte, ARM_VMSA_PTE_CONT_ENTRIES);
goto done;
}
/*
* If the PTE has been mapped with the contiguous hint set remap it and
* its surrounding PTEs to allow unmapping a single page.
*/
if (pte_val(*pte) & ARM_VMSA_PTE_CONT)
ipmmu_split_pte(mmu, pte);
/* Clear the PTE. */
ipmmu_clear_pte(mmu, pud, pmd, pte, 1);
done:
spin_unlock_irqrestore(&domain->lock, flags);
if (ret)
ipmmu_tlb_invalidate(domain);
return 0;
}
/* -----------------------------------------------------------------------------
* IOMMU Operations
*/
@ -864,12 +467,6 @@ static int ipmmu_domain_init(struct iommu_domain *io_domain)
spin_lock_init(&domain->lock);
domain->pgd = kzalloc(IPMMU_PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
if (!domain->pgd) {
kfree(domain);
return -ENOMEM;
}
io_domain->priv = domain;
domain->io_domain = io_domain;
@ -885,7 +482,7 @@ static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
* been detached.
*/
ipmmu_domain_destroy_context(domain);
ipmmu_free_pgtables(domain);
free_io_pgtable_ops(domain->iop);
kfree(domain);
}
@ -896,6 +493,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
unsigned long flags;
unsigned int i;
int ret = 0;
if (!mmu) {
@ -924,7 +522,8 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
if (ret < 0)
return ret;
ipmmu_utlb_enable(domain, archdata->utlb);
for (i = 0; i < archdata->num_utlbs; ++i)
ipmmu_utlb_enable(domain, archdata->utlbs[i]);
return 0;
}
@ -934,8 +533,10 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
unsigned int i;
ipmmu_utlb_disable(domain, archdata->utlb);
for (i = 0; i < archdata->num_utlbs; ++i)
ipmmu_utlb_disable(domain, archdata->utlbs[i]);
/*
* TODO: Optimize by disabling the context when no device is attached.
@ -950,76 +551,61 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
return ipmmu_create_mapping(domain, iova, paddr, size, prot);
return domain->iop->map(domain->iop, iova, paddr, size, prot);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
int ret;
ret = ipmmu_clear_mapping(domain, iova, size);
return ret ? 0 : size;
return domain->iop->unmap(domain->iop, iova, size);
}
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
pgd_t pgd;
pud_t pud;
pmd_t pmd;
pte_t pte;
/* TODO: Is locking needed ? */
if (!domain->pgd)
return 0;
pgd = *(domain->pgd + pgd_index(iova));
if (pgd_none(pgd))
return 0;
pud = *pud_offset(&pgd, iova);
if (pud_none(pud))
return 0;
pmd = *pmd_offset(&pud, iova);
if (pmd_none(pmd))
return 0;
if (pmd_sect(pmd))
return __pfn_to_phys(pmd_pfn(pmd)) | (iova & ~PMD_MASK);
pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
if (pte_none(pte))
return 0;
return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
return domain->iop->iova_to_phys(domain->iop, iova);
}
static int ipmmu_find_utlb(struct ipmmu_vmsa_device *mmu, struct device *dev)
static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev,
unsigned int *utlbs, unsigned int num_utlbs)
{
const struct ipmmu_vmsa_master *master = mmu->pdata->masters;
const char *devname = dev_name(dev);
unsigned int i;
for (i = 0; i < mmu->pdata->num_masters; ++i, ++master) {
if (strcmp(master->name, devname) == 0)
return master->utlb;
for (i = 0; i < num_utlbs; ++i) {
struct of_phandle_args args;
int ret;
ret = of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells", i, &args);
if (ret < 0)
return ret;
of_node_put(args.np);
if (args.np != mmu->dev->of_node || args.args_count != 1)
return -EINVAL;
utlbs[i] = args.args[0];
}
return -1;
return 0;
}
static int ipmmu_add_device(struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata;
struct ipmmu_vmsa_device *mmu;
struct iommu_group *group;
int utlb = -1;
int ret;
struct iommu_group *group = NULL;
unsigned int *utlbs;
unsigned int i;
int num_utlbs;
int ret = -ENODEV;
if (dev->archdata.iommu) {
dev_warn(dev, "IOMMU driver already assigned to device %s\n",
@ -1028,11 +614,21 @@ static int ipmmu_add_device(struct device *dev)
}
/* Find the master corresponding to the device. */
num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells");
if (num_utlbs < 0)
return -ENODEV;
utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL);
if (!utlbs)
return -ENOMEM;
spin_lock(&ipmmu_devices_lock);
list_for_each_entry(mmu, &ipmmu_devices, list) {
utlb = ipmmu_find_utlb(mmu, dev);
if (utlb >= 0) {
ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs);
if (!ret) {
/*
* TODO Take a reference to the MMU to protect
* against device removal.
@ -1043,17 +639,22 @@ static int ipmmu_add_device(struct device *dev)
spin_unlock(&ipmmu_devices_lock);
if (utlb < 0)
if (ret < 0)
return -ENODEV;
if (utlb >= mmu->num_utlbs)
return -EINVAL;
for (i = 0; i < num_utlbs; ++i) {
if (utlbs[i] >= mmu->num_utlbs) {
ret = -EINVAL;
goto error;
}
}
/* Create a device group and add the device to it. */
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
return PTR_ERR(group);
ret = PTR_ERR(group);
goto error;
}
ret = iommu_group_add_device(group, dev);
@ -1061,7 +662,8 @@ static int ipmmu_add_device(struct device *dev)
if (ret < 0) {
dev_err(dev, "Failed to add device to IPMMU group\n");
return ret;
group = NULL;
goto error;
}
archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
@ -1071,7 +673,8 @@ static int ipmmu_add_device(struct device *dev)
}
archdata->mmu = mmu;
archdata->utlb = utlb;
archdata->utlbs = utlbs;
archdata->num_utlbs = num_utlbs;
dev->archdata.iommu = archdata;
/*
@ -1090,7 +693,8 @@ static int ipmmu_add_device(struct device *dev)
SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
return PTR_ERR(mapping);
ret = PTR_ERR(mapping);
goto error;
}
mmu->mapping = mapping;
@ -1106,17 +710,29 @@ static int ipmmu_add_device(struct device *dev)
return 0;
error:
arm_iommu_release_mapping(mmu->mapping);
kfree(dev->archdata.iommu);
kfree(utlbs);
dev->archdata.iommu = NULL;
iommu_group_remove_device(dev);
if (!IS_ERR_OR_NULL(group))
iommu_group_remove_device(dev);
return ret;
}
static void ipmmu_remove_device(struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
arm_iommu_detach_device(dev);
iommu_group_remove_device(dev);
kfree(dev->archdata.iommu);
kfree(archdata->utlbs);
kfree(archdata);
dev->archdata.iommu = NULL;
}
@ -1131,7 +747,7 @@ static const struct iommu_ops ipmmu_ops = {
.iova_to_phys = ipmmu_iova_to_phys,
.add_device = ipmmu_add_device,
.remove_device = ipmmu_remove_device,
.pgsize_bitmap = SZ_2M | SZ_64K | SZ_4K,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
};
/* -----------------------------------------------------------------------------
@ -1154,7 +770,7 @@ static int ipmmu_probe(struct platform_device *pdev)
int irq;
int ret;
if (!pdev->dev.platform_data) {
if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) {
dev_err(&pdev->dev, "missing platform data\n");
return -EINVAL;
}
@ -1166,7 +782,6 @@ static int ipmmu_probe(struct platform_device *pdev)
}
mmu->dev = &pdev->dev;
mmu->pdata = pdev->dev.platform_data;
mmu->num_utlbs = 32;
/* Map I/O memory and request IRQ. */
@ -1175,6 +790,20 @@ static int ipmmu_probe(struct platform_device *pdev)
if (IS_ERR(mmu->base))
return PTR_ERR(mmu->base);
/*
* The IPMMU has two register banks, for secure and non-secure modes.
* The bank mapped at the beginning of the IPMMU address space
* corresponds to the running mode of the CPU. When running in secure
* mode the non-secure register bank is also available at an offset.
*
* Secure mode operation isn't clearly documented and is thus currently
* not implemented in the driver. Furthermore, preliminary tests of
* non-secure operation with the main register bank were not successful.
* Offset the registers base unconditionally to point to the non-secure
* alias space for now.
*/
mmu->base += IM_NS_ALIAS_OFFSET;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no IRQ found\n");
@ -1220,9 +849,14 @@ static int ipmmu_remove(struct platform_device *pdev)
return 0;
}
static const struct of_device_id ipmmu_of_ids[] = {
{ .compatible = "renesas,ipmmu-vmsa", },
};
static struct platform_driver ipmmu_driver = {
.driver = {
.name = "ipmmu-vmsa",
.of_match_table = of_match_ptr(ipmmu_of_ids),
},
.probe = ipmmu_probe,
.remove = ipmmu_remove,

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 Advanced Micro Devices, Inc.
* Author: Joerg Roedel <joerg.roedel@amd.com>
* Author: Joerg Roedel <jroedel@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published

View File

@ -1126,7 +1126,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return -EINVAL;
}
dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
iotlb_init_entry(&e, da, pa, omap_pgsz);

144
include/linux/iopoll.h Normal file
View File

@ -0,0 +1,144 @@
/*
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_IOPOLL_H
#define _LINUX_IOPOLL_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/io.h>
/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @sleep_us: Maximum time to sleep between reads in us (0
* tight-loops). Should be less than ~20ms since usleep_range
* is used (see Documentation/timers/timers-howto.txt).
* @timeout_us: Timeout in us, 0 means never timeout
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @addr is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
might_sleep_if(sleep_us); \
for (;;) { \
(val) = op(addr); \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
(val) = op(addr); \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
/**
* readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs
* @op: accessor function (takes @addr as its only argument)
* @addr: Address to poll
* @val: Variable to read the value into
* @cond: Break condition (usually involving @val)
* @delay_us: Time to udelay between reads in us (0 tight-loops). Should
* be less than ~10us since udelay is used (see
* Documentation/timers/timers-howto.txt).
* @timeout_us: Timeout in us, 0 means never timeout
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @addr is stored in @val.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
*/
#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \
({ \
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \
for (;;) { \
(val) = op(addr); \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \
(val) = op(addr); \
break; \
} \
if (delay_us) \
udelay(delay_us); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us)
#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us)
#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us)
#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us)
#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us)
#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us)
#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us)
#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us)
#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us)
#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us)
#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us)
#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us)
#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us)
#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us)
#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us)
#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \
readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us)
#endif /* _LINUX_IOPOLL_H */

View File

@ -16,9 +16,6 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h>
/* IO virtual address start page frame number */
#define IOVA_START_PFN (1)
/* iova structure */
struct iova {
struct rb_node node;
@ -31,6 +28,8 @@ struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
};
@ -39,6 +38,39 @@ static inline unsigned long iova_size(struct iova *iova)
return iova->pfn_hi - iova->pfn_lo + 1;
}
static inline unsigned long iova_shift(struct iova_domain *iovad)
{
return __ffs(iovad->granule);
}
static inline unsigned long iova_mask(struct iova_domain *iovad)
{
return iovad->granule - 1;
}
static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
{
return iova & iova_mask(iovad);
}
static inline size_t iova_align(struct iova_domain *iovad, size_t size)
{
return ALIGN(size, iovad->granule);
}
static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
{
return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
}
static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
{
return iova >> iova_shift(iovad);
}
int iommu_iova_cache_init(void);
void iommu_iova_cache_destroy(void);
struct iova *alloc_iova_mem(void);
void free_iova_mem(struct iova *iova);
void free_iova(struct iova_domain *iovad, unsigned long pfn);
@ -49,7 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,

View File

@ -1,24 +0,0 @@
/*
* IPMMU VMSA Platform Data
*
* Copyright (C) 2014 Renesas Electronics Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*/
#ifndef __IPMMU_VMSA_H__
#define __IPMMU_VMSA_H__
struct ipmmu_vmsa_master {
const char *name;
unsigned int utlb;
};
struct ipmmu_vmsa_platform_data {
const struct ipmmu_vmsa_master *masters;
unsigned int num_masters;
};
#endif /* __IPMMU_VMSA_H__ */

View File

@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain,
TP_ARGS(dev)
);
DECLARE_EVENT_CLASS(iommu_map_unmap,
TRACE_EVENT(map,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
TP_STRUCT__entry(
__field(u64, iova)
__field(u64, paddr)
__field(int, size)
__field(size_t, size)
),
TP_fast_assign(
@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap,
__entry->size = size;
),
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x",
TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
__entry->iova, __entry->paddr, __entry->size
)
);
DEFINE_EVENT(iommu_map_unmap, map,
TRACE_EVENT(unmap,
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
TP_ARGS(iova, paddr, size)
);
TP_ARGS(iova, size, unmapped_size),
DEFINE_EVENT_PRINT(iommu_map_unmap, unmap,
TP_STRUCT__entry(
__field(u64, iova)
__field(size_t, size)
__field(size_t, unmapped_size)
),
TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
TP_fast_assign(
__entry->iova = iova;
__entry->size = size;
__entry->unmapped_size = unmapped_size;
),
TP_ARGS(iova, paddr, size),
TP_printk("IOMMU: iova=0x%016llx size=0x%x",
__entry->iova, __entry->size
TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
__entry->iova, __entry->size, __entry->unmapped_size
)
);