Merge branches 'iommu/fixes', 'x86/vt-d', 'x86/amd', 'arm/smmu', 'arm/tegra' and 'core' into next

Conflicts:
	drivers/iommu/amd_iommu.c
	drivers/iommu/tegra-gart.c
	drivers/iommu/tegra-smmu.c
This commit is contained in:
Joerg Roedel 2015-04-02 13:33:19 +02:00
146 changed files with 1937 additions and 1137 deletions

View File

@ -1186,7 +1186,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-mvebu/
F: drivers/rtc/armada38x-rtc
F: drivers/rtc/rtc-armada38x.c
ARM/Marvell Berlin SoC support
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
@ -1362,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
F: drivers/*/*rockchip*
F: drivers/*/*/*rockchip*
F: sound/soc/rockchip/
N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
@ -1675,8 +1676,8 @@ F: drivers/misc/eeprom/at24.c
F: include/linux/platform_data/at24.h
ATA OVER ETHERNET (AOE) DRIVER
M: "Ed L. Cashin" <ecashin@coraid.com>
W: http://support.coraid.com/support/linux
M: "Ed L. Cashin" <ed.cashin@acm.org>
W: http://www.openaoe.org/
S: Supported
F: Documentation/aoe/
F: drivers/block/aoe/
@ -3252,6 +3253,13 @@ S: Maintained
F: Documentation/hwmon/dme1737
F: drivers/hwmon/dme1737.c
DMI/SMBIOS SUPPORT
M: Jean Delvare <jdelvare@suse.de>
S: Maintained
F: drivers/firmware/dmi-id.c
F: drivers/firmware/dmi_scan.c
F: include/linux/dmi.h
DOCKING STATION DRIVER
M: Shaohua Li <shaohua.li@intel.com>
L: linux-acpi@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 0
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*

View File

@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
{
int err;
err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
sizeof(sf->uc.uc_mcontext.regs.scratch));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
if (!err)
set_current_blocked(&set);
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
return err;
@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
/*
* Ensure that sigreturn always returns to user mode (in case the
* regs saved on user stack got fudged between save and sigreturn)
* Otherwise it is easy to panic the kernel with a custom
* signal handler and/or restorer which clobberes the status32/ret
* to return to a bogus location in kernel mode.
*/
regs->status32 |= STATUS_U_MASK;
return regs->r0;
badframe:
@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* handler returns using sigreturn stub provided already by userpsace
* If not, nuke the process right away
*/
BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
return 1;
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
/* User Stack for signal handler will be above the frame just carved */
@ -296,12 +308,12 @@ static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
int failed;
/* Set up the stack frame */
ret = setup_rt_frame(ksig, oldset, regs);
failed = setup_rt_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, 0);
signal_setup_done(failed, ksig, 0);
}
void do_signal(struct pt_regs *regs)

View File

@ -619,6 +619,7 @@ config ARCH_PXA
select GENERIC_CLOCKEVENTS
select GPIO_PXA
select HAVE_IDE
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ

View File

@ -36,6 +36,20 @@ DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */
>;
};
mmc_pins: pinmux_mmc_pins {
pinctrl-single,pins = <
DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
>;
};
usb0_pins: pinmux_usb0_pins {
pinctrl-single,pins = <
DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
@ -137,7 +151,12 @@ m25p80@0 {
};
&mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc_pins>;
vmmc-supply = <&vmmcsd_fixed>;
bus-width = <4>;
cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
};
/* At least dm8168-evm rev c won't support multipoint, later may */

View File

@ -150,17 +150,27 @@ elm: elm@48080000 {
};
gpio1: gpio@48032000 {
compatible = "ti,omap3-gpio";
compatible = "ti,omap4-gpio";
ti,hwmods = "gpio1";
ti,gpio-always-on;
reg = <0x48032000 0x1000>;
interrupts = <97>;
interrupts = <96>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
gpio2: gpio@4804c000 {
compatible = "ti,omap3-gpio";
compatible = "ti,omap4-gpio";
ti,hwmods = "gpio2";
ti,gpio-always-on;
reg = <0x4804c000 0x1000>;
interrupts = <99>;
interrupts = <98>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
gpmc: gpmc@50000000 {

View File

@ -1111,7 +1111,6 @@ pcie1_phy: pciephy@4a094000 {
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
ti,hwmods = "pcie1-phy";
};
pcie2_phy: pciephy@4a095000 {
@ -1130,7 +1129,6 @@ pcie2_phy: pciephy@4a095000 {
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
ti,hwmods = "pcie2-phy";
status = "disabled";
};
};

View File

@ -92,6 +92,8 @@ aes: aes@480c5000 {
ti,hwmods = "aes";
reg = <0x480c5000 0x50>;
interrupts = <0>;
dmas = <&sdma 65 &sdma 66>;
dma-names = "tx", "rx";
};
prm: prm@48306000 {
@ -550,6 +552,8 @@ sham: sham@480c3000 {
ti,hwmods = "sham";
reg = <0x480c3000 0x64>;
interrupts = <49>;
dmas = <&sdma 69>;
dma-names = "rx";
};
smartreflex_core: smartreflex@480cb000 {

View File

@ -411,6 +411,7 @@ gmac: ethernet@ff290000 {
"mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac";
status = "disabled";
};
usb_host0_ehci: usb@ff500000 {

View File

@ -660,7 +660,7 @@ spi1: spi@fff01000 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0xfff01000 0x1000>;
interrupts = <0 156 4>;
interrupts = <0 155 4>;
num-cs = <4>;
clocks = <&spi_m_clk>;
status = "disabled";

View File

@ -56,6 +56,22 @@ / {
model = "Olimex A10-OLinuXino-LIME";
compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
cpus {
cpu0: cpu@0 {
/*
* The A10-Lime is known to be unstable
* when running at 1008 MHz
*/
operating-points = <
/* kHz uV */
912000 1350000
864000 1300000
624000 1250000
>;
cooling-max-level = <2>;
};
};
soc@01c00000 {
emac: ethernet@01c0b000 {
pinctrl-names = "default";

View File

@ -75,7 +75,6 @@ cpu0: cpu@0 {
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1056000 1500000
1008000 1400000
912000 1350000
864000 1300000
@ -83,7 +82,7 @@ cpu0: cpu@0 {
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <4>;
cooling-max-level = <3>;
};
};

View File

@ -47,7 +47,6 @@ cpu0: cpu@0 {
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1104000 1500000
1008000 1400000
912000 1350000
864000 1300000
@ -57,7 +56,7 @@ cpu0: cpu@0 {
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <6>;
cooling-max-level = <5>;
};
};

View File

@ -105,7 +105,6 @@ cpu0: cpu@0 {
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1008000 1450000
960000 1400000
912000 1400000
864000 1300000
@ -116,7 +115,7 @@ cpu0: cpu@0 {
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <7>;
cooling-max-level = <6>;
};
cpu@1 {

View File

@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
return kasprintf(GFP_KERNEL, "OMAP4");
else if (soc_is_omap54xx())
return kasprintf(GFP_KERNEL, "OMAP5");
else if (soc_is_am33xx() || soc_is_am335x())
return kasprintf(GFP_KERNEL, "AM33xx");
else if (soc_is_am43xx())
return kasprintf(GFP_KERNEL, "AM43xx");
else if (soc_is_dra7xx())

View File

@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@ -40,7 +41,6 @@
#define ICHP_VAL_IRQ (1 << 31)
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
#define IPR_VALID (1 << 31)
#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
#define MAX_INTERNAL_IRQS 128
@ -51,6 +51,7 @@
static void __iomem *pxa_irq_base;
static int pxa_internal_irq_nr;
static bool cpu_has_ipr;
static struct irq_domain *pxa_irq_domain;
static inline void __iomem *irq_base(int i)
{
@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
void pxa_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
icmr &= ~(1 << IRQ_BIT(d->irq));
icmr &= ~BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
void pxa_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
irq_hw_number_t irq = irqd_to_hwirq(d);
uint32_t icmr = __raw_readl(base + ICMR);
icmr |= 1 << IRQ_BIT(d->irq);
icmr |= BIT(irq & 0x1f);
__raw_writel(icmr, base + ICMR);
}
@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
} while (1);
}
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
int irq, i, n;
void __iomem *base = irq_base(hw / 32);
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(virq, base);
set_irq_flags(virq, IRQF_VALID);
return 0;
}
static struct irq_domain_ops pxa_irq_ops = {
.map = pxa_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static __init void
pxa_init_irq_common(struct device_node *node, int irq_nr,
int (*fn)(struct irq_data *, unsigned int))
{
int n;
pxa_internal_irq_nr = irq_nr;
cpu_has_ipr = !cpu_is_pxa25x();
pxa_irq_base = io_p2v(0x40d00000);
pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
PXA_IRQ(0), 0,
&pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_host(pxa_irq_domain);
for (n = 0; n < irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
irq = PXA_IRQ(i);
irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(irq, base);
set_irq_flags(irq, IRQF_VALID);
}
}
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
}
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
{
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
pxa_irq_base = io_p2v(0x40d00000);
cpu_has_ipr = !cpu_is_pxa25x();
pxa_init_irq_common(NULL, irq_nr, fn);
}
#ifdef CONFIG_PM
static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
};
#ifdef CONFIG_OF
static struct irq_domain *pxa_irq_domain;
static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
void __iomem *base = irq_base(hw / 32);
/* initialize interrupt priority */
if (cpu_has_ipr)
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
handle_level_irq);
irq_set_chip_data(hw, base);
set_irq_flags(hw, IRQF_VALID);
return 0;
}
static struct irq_domain_ops pxa_irq_ops = {
.map = pxa_irq_map,
.xlate = irq_domain_xlate_onecell,
};
static const struct of_device_id intc_ids[] __initconst = {
{ .compatible = "marvell,pxa-intc", },
{}
@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
{
struct device_node *node;
struct resource res;
int n, ret;
int ret;
node = of_find_matching_node(NULL, intc_ids);
if (!node) {
@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
return;
}
pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
&pxa_irq_ops, NULL);
if (!pxa_irq_domain)
panic("Unable to add PXA IRQ domain\n");
irq_set_default_host(pxa_irq_domain);
for (n = 0; n < pxa_internal_irq_nr; n += 32) {
void __iomem *base = irq_base(n >> 5);
__raw_writel(0, base + ICMR); /* disable all IRQs */
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
}
/* only unmasked interrupts kick us out of idle */
__raw_writel(1, irq_base(0) + ICCR);
pxa_internal_irq_chip.irq_set_wake = fn;
pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
}
#endif /* CONFIG_OF */

View File

@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
};
static struct platform_device can_regulator_device = {
.name = "reg-fixed-volage",
.name = "reg-fixed-voltage",
.id = 0,
.dev = {
.platform_data = &can_regulator_pdata,

View File

@ -1,10 +1,12 @@
menuconfig ARCH_SUNXI
bool "Allwinner SoCs" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select PINCTRL
select SUN4I_TIMER
select RESET_CONTROLLER
if ARCH_SUNXI
@ -20,10 +22,8 @@ config MACH_SUN5I
config MACH_SUN6I
bool "Allwinner A31 (sun6i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
select RESET_CONTROLLER
select SUN5I_HSTIMER
config MACH_SUN7I
@ -37,16 +37,12 @@ config MACH_SUN7I
config MACH_SUN8I
bool "Allwinner A23 (sun8i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
select RESET_CONTROLLER
config MACH_SUN9I
bool "Allwinner (sun9i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select RESET_CONTROLLER
endif

View File

@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct of_device_id *match;
const struct dmtimer_platform_data *pdata;
int ret;
match = of_match_device(of_match_ptr(omap_timer_match), dev);
pdata = match ? match->data : dev->platform_data;
@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
}
if (!timer->reserved) {
pm_runtime_get_sync(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
__func__);
goto err_get_sync;
}
__omap_dm_timer_init_regs(timer);
pm_runtime_put(dev);
}
@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
dev_dbg(dev, "Device Probed.\n");
return 0;
err_get_sync:
pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
/**
@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
}
spin_unlock_irqrestore(&dm_timer_lock, flags);
pm_runtime_disable(&pdev->dev);
return ret;
}

View File

@ -8,7 +8,7 @@
*/
/* SoC fixed clocks */
soc_uartclk: refclk72738khz {
soc_uartclk: refclk7273800hz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <7273800>;

View File

@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
__ret; \
})
#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
preempt_disable(); \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
preempt_enable(); \
__ret; \
})
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2)
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
({ \
int __ret; \
preempt_disable(); \
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
raw_cpu_ptr(&(ptr2)), \
o1, o2, n1, n2); \
preempt_enable(); \
__ret; \
})
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))

View File

@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
unsigned int cpu = smp_processor_id();
/*
* init_mm.pgd does not contain any user mappings and it is always
* active for kernel addresses in TTBR1. Just set the reserved TTBR0.
*/
if (next == &init_mm) {
cpu_set_reserved_ttbr0();
return;
}
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
check_and_switch_context(next, tsk);
}

View File

@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
return ret;
}
#define _percpu_add(pcp, val) \
__percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
preempt_disable(); \
__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
sizeof(pcp)); \
preempt_enable(); \
__retval; \
})
#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
#define _percpu_write(pcp, val) \
do { \
preempt_disable(); \
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
sizeof(pcp)); \
preempt_enable(); \
} while(0) \
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
preempt_disable(); \
__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
(val), sizeof(pcp)); \
preempt_enable(); \
__retval; \
})
#define _percpu_add(pcp, val) \
_pcp_protect(__percpu_add, pcp, val)
#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
#define _percpu_and(pcp, val) \
__percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
_pcp_protect(__percpu_and, pcp, val)
#define _percpu_or(pcp, val) \
__percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
#define _percpu_read(pcp) (typeof(pcp)) \
(__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
#define _percpu_write(pcp, val) \
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
_pcp_protect(__percpu_or, pcp, val)
#define _percpu_xchg(pcp, val) (typeof(pcp)) \
(__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)

View File

@ -2,6 +2,7 @@
#define _ASM_METAG_IO_H
#include <linux/types.h>
#include <asm/pgtable-bits.h>
#define IO_SPACE_LIMIT 0

View File

@ -0,0 +1,104 @@
/*
* Meta page table definitions.
*/
#ifndef _METAG_PGTABLE_BITS_H
#define _METAG_PGTABLE_BITS_H
#include <asm/metag_mem.h>
/*
* Definitions for MMU descriptors
*
* These are the hardware bits in the MMCU pte entries.
* Derived from the Meta toolkit headers.
*/
#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
/* Write combine bit - this can cause writes to occur out of order */
#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
/* Sys coherent bit - this bit is never used by Linux */
#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
#define _PAGE_ALWAYS_ZERO_1 0x020
#define _PAGE_CACHE_CTRL0 0x040
#define _PAGE_CACHE_CTRL1 0x080
#define _PAGE_ALWAYS_ZERO_2 0x100
#define _PAGE_ALWAYS_ZERO_3 0x200
#define _PAGE_ALWAYS_ZERO_4 0x400
#define _PAGE_ALWAYS_ZERO_5 0x800
/* These are software bits that we stuff into the gaps in the hardware
* pte entries that are not used. Note, these DO get stored in the actual
* hardware, but the hardware just does not use them.
*/
#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
/* Pages owned, and protected by, the kernel. */
#define _PAGE_KERNEL _PAGE_PRIV
/* No cacheing of this page */
#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
/* burst cacheing - good for data streaming */
#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
/* One cache way per thread */
#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
/* Full on cacheing */
#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
/* which bits are used for cache control ... */
#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
_PAGE_WR_COMBINE)
/* This is a mask of the bits that pte_modify is allowed to change. */
#define _PAGE_CHG_MASK (PAGE_MASK)
#define _PAGE_SZ_SHIFT 1
#define _PAGE_SZ_4K (0x0)
#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
#if defined(CONFIG_PAGE_SIZE_4K)
#define _PAGE_SZ (_PAGE_SZ_4K)
#elif defined(CONFIG_PAGE_SIZE_8K)
#define _PAGE_SZ (_PAGE_SZ_8K)
#elif defined(CONFIG_PAGE_SIZE_16K)
#define _PAGE_SZ (_PAGE_SZ_16K)
#endif
#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
# define _PAGE_SZHUGE (_PAGE_SZ_8K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
# define _PAGE_SZHUGE (_PAGE_SZ_16K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
# define _PAGE_SZHUGE (_PAGE_SZ_32K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define _PAGE_SZHUGE (_PAGE_SZ_64K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
# define _PAGE_SZHUGE (_PAGE_SZ_128K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
# define _PAGE_SZHUGE (_PAGE_SZ_256K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
# define _PAGE_SZHUGE (_PAGE_SZ_512K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
# define _PAGE_SZHUGE (_PAGE_SZ_1M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
# define _PAGE_SZHUGE (_PAGE_SZ_2M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
# define _PAGE_SZHUGE (_PAGE_SZ_4M)
#endif
#endif /* _METAG_PGTABLE_BITS_H */

View File

@ -5,6 +5,7 @@
#ifndef _METAG_PGTABLE_H
#define _METAG_PGTABLE_H
#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
@ -20,100 +21,6 @@
#define VMALLOC_END 0x7FFFFFFF
#endif
/*
* Definitions for MMU descriptors
*
* These are the hardware bits in the MMCU pte entries.
* Derived from the Meta toolkit headers.
*/
#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
/* Write combine bit - this can cause writes to occur out of order */
#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
/* Sys coherent bit - this bit is never used by Linux */
#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
#define _PAGE_ALWAYS_ZERO_1 0x020
#define _PAGE_CACHE_CTRL0 0x040
#define _PAGE_CACHE_CTRL1 0x080
#define _PAGE_ALWAYS_ZERO_2 0x100
#define _PAGE_ALWAYS_ZERO_3 0x200
#define _PAGE_ALWAYS_ZERO_4 0x400
#define _PAGE_ALWAYS_ZERO_5 0x800
/* These are software bits that we stuff into the gaps in the hardware
* pte entries that are not used. Note, these DO get stored in the actual
* hardware, but the hardware just does not use them.
*/
#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
/* Pages owned, and protected by, the kernel. */
#define _PAGE_KERNEL _PAGE_PRIV
/* No cacheing of this page */
#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
/* burst cacheing - good for data streaming */
#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
/* One cache way per thread */
#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
/* Full on cacheing */
#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
/* which bits are used for cache control ... */
#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
_PAGE_WR_COMBINE)
/* This is a mask of the bits that pte_modify is allowed to change. */
#define _PAGE_CHG_MASK (PAGE_MASK)
#define _PAGE_SZ_SHIFT 1
#define _PAGE_SZ_4K (0x0)
#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
#if defined(CONFIG_PAGE_SIZE_4K)
#define _PAGE_SZ (_PAGE_SZ_4K)
#elif defined(CONFIG_PAGE_SIZE_8K)
#define _PAGE_SZ (_PAGE_SZ_8K)
#elif defined(CONFIG_PAGE_SIZE_16K)
#define _PAGE_SZ (_PAGE_SZ_16K)
#endif
#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
# define _PAGE_SZHUGE (_PAGE_SZ_8K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
# define _PAGE_SZHUGE (_PAGE_SZ_16K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
# define _PAGE_SZHUGE (_PAGE_SZ_32K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
# define _PAGE_SZHUGE (_PAGE_SZ_64K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
# define _PAGE_SZHUGE (_PAGE_SZ_128K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
# define _PAGE_SZHUGE (_PAGE_SZ_256K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
# define _PAGE_SZHUGE (_PAGE_SZ_512K)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
# define _PAGE_SZHUGE (_PAGE_SZ_1M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
# define _PAGE_SZHUGE (_PAGE_SZ_2M)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
# define _PAGE_SZHUGE (_PAGE_SZ_4M)
#endif
/*
* The Linux memory management assumes a three-level page table setup. On
* Meta, we use that, but "fold" the mid level into the top-level page

View File

@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if (likely(pgd != NULL)) {
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
actual_pgd += PTRS_PER_PGD;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
pgd -= PTRS_PER_PGD;
#endif
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
#ifdef CONFIG_64BIT
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
/*
* This is the permanent pmd attached to the pgd;
* cannot free it.
* Increment the counter to compensate for the decrement
* done by generic mm code.
*/
mm_inc_nr_pmds(mm);
return;
#endif
free_pages((unsigned long)pmd, PMD_ORDER);
}
@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
#ifdef CONFIG_64BIT
#if PT_NLEVELS == 3
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)

View File

@ -55,8 +55,8 @@
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
ENTRY_SAME(restart_syscall) /* 0 */
ENTRY_SAME(exit)
90: ENTRY_SAME(restart_syscall) /* 0 */
91: ENTRY_SAME(exit)
ENTRY_SAME(fork_wrapper)
ENTRY_SAME(read)
ENTRY_SAME(write)
@ -439,7 +439,10 @@
ENTRY_SAME(bpf)
ENTRY_COMP(execveat)
/* Nothing yet */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
.error "size of syscall table does not fit value of __NR_Linux_syscalls"
.endif
#undef ENTRY_SAME
#undef ENTRY_DIFF

View File

@ -153,6 +153,7 @@
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_MSGCLR 0x7c0001dc
#define PPC_INST_MSGSNDP 0x7c00011c
#define PPC_INST_MTTMR 0x7c0003dc
#define PPC_INST_NOP 0x60000000
@ -309,6 +310,8 @@
___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
___PPC_RB(b))
#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
___PPC_RB(b))
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \

View File

@ -608,13 +608,16 @@
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
#define SRR1_WAKESYSERR 0x00300000 /* System error */
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
#define SRR1_WAKERESET 0x00100000 /* System reset */
#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
* may not be recoverable */

View File

@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8NVL */
.pvr_mask = 0xffff0000,
.pvr_value = 0x004c0000,
.cpu_name = "POWER8NVL (raw)",
.cpu_features = CPU_FTRS_POWER8,
.cpu_user_features = COMMON_USER_POWER8,
.cpu_user_features2 = COMMON_USER2_POWER8,
.mmu_features = MMU_FTRS_POWER8,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power8",
.oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = __setup_cpu_power8,
.cpu_restore = __restore_cpu_power8,
.flush_tlb = __flush_tlb_power8,
.machine_check_early = __machine_check_early_realmode_p8,
.platform = "power8",
},
{ /* Power8 DD1: Does not support doorbell IPIs */
.pvr_mask = 0xffffff00,
.pvr_value = 0x004d0100,

View File

@ -17,6 +17,7 @@
#include <asm/dbell.h>
#include <asm/irq_regs.h>
#include <asm/kvm_ppc.h>
#ifdef CONFIG_SMP
void doorbell_setup_this_cpu(void)
@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
kvmppc_set_host_ipi(smp_processor_id(), 0);
__this_cpu_inc(irq_stat.doorbell_irqs);
smp_ipi_demux();

View File

@ -1408,7 +1408,7 @@ machine_check_handle_early:
bne 9f /* continue in V mode if we are. */
5:
#ifdef CONFIG_KVM_BOOK3S_64_HV
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
* We are coming from kernel context. Check if we are coming from
* guest. if yes, then we can continue. We will fall through

View File

@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
spin_lock(&vcpu->arch.vpa_update_lock);
lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
if (lppaca)
yield_count = lppaca->yield_count;
yield_count = be32_to_cpu(lppaca->yield_count);
spin_unlock(&vcpu->arch.vpa_update_lock);
return yield_count;
}
@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 mask;
mutex_lock(&kvm->lock);
spin_lock(&vc->lock);
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *vcpu;
int i;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
continue;
@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
else
vcpu->arch.intr_msr &= ~MSR_LE;
}
mutex_unlock(&kvm->lock);
}
/*
@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask &= 0xFFFFFFFF;
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
mutex_unlock(&kvm->lock);
}
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,

View File

@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Save HEIR (HV emulation assist reg) in emul_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
stw r3,VCPU_LAST_INST(r9)
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR

View File

@ -33,6 +33,8 @@
#include <asm/runlatch.h>
#include <asm/code-patching.h>
#include <asm/dbell.h>
#include <asm/kvm_ppc.h>
#include <asm/ppc-opcode.h>
#include "powernv.h"
@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
unsigned long srr1;
unsigned long srr1, wmask;
u32 idle_states;
/* Standard hot unplug procedure */
@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
generic_set_cpu_dead(cpu);
smp_wmb();
wmask = SRR1_WAKEMASK;
if (cpu_has_feature(CPU_FTR_ARCH_207S))
wmask = SRR1_WAKEMASK_P8;
idle_states = pnv_get_supported_cpuidle_states();
/* We don't want to take decrementer interrupts while we are offline,
* so clear LPCR:PECE1. We keep PECE2 enabled.
@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
* having finished executing in a KVM guest, then srr1
* contains 0.
*/
if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
if ((srr1 & wmask) == SRR1_WAKEEE) {
icp_native_flush_interrupt();
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
smp_mb();
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
kvmppc_set_host_ipi(cpu, 0);
}
if (cpu_core_split_required())

View File

@ -25,10 +25,10 @@
static struct kobject *mobility_kobj;
struct update_props_workarea {
u32 phandle;
u32 state;
u64 reserved;
u32 nprops;
__be32 phandle;
__be32 state;
__be64 reserved;
__be32 nprops;
} __packed;
#define NODE_ACTION_MASK 0xff000000
@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
return rc;
}
static int delete_dt_node(u32 phandle)
static int delete_dt_node(__be32 phandle)
{
struct device_node *dn;
dn = of_find_node_by_phandle(phandle);
dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn)
return -ENOENT;
@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
static int update_dt_node(u32 phandle, s32 scope)
static int update_dt_node(__be32 phandle, s32 scope)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
char *prop_data;
char *rtas_buf;
int update_properties_token;
u32 nprops;
u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
if (!rtas_buf)
return -ENOMEM;
dn = of_find_node_by_phandle(phandle);
dn = of_find_node_by_phandle(be32_to_cpu(phandle));
if (!dn) {
kfree(rtas_buf);
return -ENOENT;
@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
break;
prop_data = rtas_buf + sizeof(*upwa);
nprops = be32_to_cpu(upwa->nprops);
/* On the first call to ibm,update-properties for a node the
* the first property value descriptor contains an empty
@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
*/
if (*prop_data == 0) {
prop_data++;
vd = *(u32 *)prop_data;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += vd + sizeof(vd);
upwa->nprops--;
nprops--;
}
for (i = 0; i < upwa->nprops; i++) {
for (i = 0; i < nprops; i++) {
char *prop_name;
prop_name = prop_data;
prop_data += strlen(prop_name) + 1;
vd = *(u32 *)prop_data;
vd = be32_to_cpu(*(__be32 *)prop_data);
prop_data += sizeof(vd);
switch (vd) {
@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
return 0;
}
static int add_dt_node(u32 parent_phandle, u32 drc_index)
static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
{
struct device_node *dn;
struct device_node *parent_dn;
int rc;
parent_dn = of_find_node_by_phandle(parent_phandle);
parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
if (!parent_dn)
return -ENOENT;
@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
u32 *data;
__be32 *data;
int update_nodes_token;
int rc;
@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
if (rc && rc != 1)
break;
data = (u32 *)rtas_buf + 4;
while (*data & NODE_ACTION_MASK) {
data = (__be32 *)rtas_buf + 4;
while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
int i;
u32 action = *data & NODE_ACTION_MASK;
int node_count = *data & NODE_COUNT_MASK;
u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
data++;
for (i = 0; i < node_count; i++) {
u32 phandle = *data++;
u32 drc_index;
__be32 phandle = *data++;
__be32 drc_index;
switch (action) {
case DELETE_DT_NODE:

View File

@ -211,7 +211,7 @@ do { \
extern unsigned long mmap_rnd_mask;
#define STACK_RND_MASK (mmap_rnd_mask)
#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
#define ARCH_DLINFO \
do { \

View File

@ -57,6 +57,44 @@
unsigned long ftrace_plt;
static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
{
#ifdef CC_USING_HOTPATCH
/* brcl 0,0 */
insn->opc = 0xc004;
insn->disp = 0;
#else
/* stg r14,8(r15) */
insn->opc = 0xe3e0;
insn->disp = 0xf0080024;
#endif
}
static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
if (insn->opc == BREAKPOINT_INSTRUCTION)
return 1;
#endif
return 0;
}
static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
insn->opc = BREAKPOINT_INSTRUCTION;
insn->disp = KPROBE_ON_FTRACE_NOP;
#endif
}
static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
insn->opc = BREAKPOINT_INSTRUCTION;
insn->disp = KPROBE_ON_FTRACE_CALL;
#endif
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
return -EFAULT;
if (addr == MCOUNT_ADDR) {
/* Initial code replacement */
#ifdef CC_USING_HOTPATCH
/* We expect to see brcl 0,0 */
ftrace_generate_nop_insn(&orig);
#else
/* We expect to see stg r14,8(r15) */
orig.opc = 0xe3e0;
orig.disp = 0xf0080024;
#endif
ftrace_generate_orig_insn(&orig);
ftrace_generate_nop_insn(&new);
} else if (old.opc == BREAKPOINT_INSTRUCTION) {
} else if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
* bytes of the original instruction so that the kprobes
* handler can execute a nop, if it reaches this breakpoint.
*/
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
orig.disp = KPROBE_ON_FTRACE_CALL;
new.disp = KPROBE_ON_FTRACE_NOP;
ftrace_generate_kprobe_call_insn(&orig);
ftrace_generate_kprobe_nop_insn(&new);
} else {
/* Replace ftrace call with a nop. */
ftrace_generate_call_insn(&orig, rec->ip);
@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
if (old.opc == BREAKPOINT_INSTRUCTION) {
if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* bytes of the original instruction so that the kprobes
* handler can execute a brasl if it reaches this breakpoint.
*/
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
orig.disp = KPROBE_ON_FTRACE_NOP;
new.disp = KPROBE_ON_FTRACE_CALL;
ftrace_generate_kprobe_nop_insn(&orig);
ftrace_generate_kprobe_call_insn(&new);
} else {
/* Replace nop with an ftrace call. */
ftrace_generate_nop_insn(&orig);

View File

@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
static struct attribute *cpumsf_pmu_events_attr[] = {
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
NULL,
NULL,
};
@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
return -EINVAL;
}
if (si.ad)
if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
cpumsf_pmu_events_attr[1] =
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
}
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
if (!sfdbg)

View File

@ -177,6 +177,17 @@ restart_entry:
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
#ifdef CONFIG_SMP
larl %r1,smp_cpu_mt_shift
icm %r1,15,0(%r1)
jz smt_done
llgfr %r1,%r1
smt_loop:
sigp %r1,%r0,SIGP_SET_MULTI_THREADING
brc 8,smt_done /* accepted */
brc 2,smt_loop /* busy, try again */
smt_done:
#endif
larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1)
pgm_check_entry:

View File

@ -2957,6 +2957,17 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
#define HV_FAST_M7_GET_PERFREG 0x43
#define HV_FAST_M7_SET_PERFREG 0x44
#ifndef __ASSEMBLY__
unsigned long sun4v_m7_get_perfreg(unsigned long reg_num,
unsigned long *reg_val);
unsigned long sun4v_m7_set_perfreg(unsigned long reg_num,
unsigned long reg_val);
#endif
/* Function numbers for HV_CORE_TRAP. */
#define HV_CORE_SET_VER 0x00
#define HV_CORE_PUTCHAR 0x01
@ -2981,6 +2992,7 @@ unsigned long sun4v_t5_set_perfreg(unsigned long reg_num,
#define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
#define HV_GRP_N2_CPU 0x0202

View File

@ -48,6 +48,7 @@ static struct api_info api_table[] = {
{ .group = HV_GRP_VT_CPU, },
{ .group = HV_GRP_T5_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
{ .group = HV_GRP_M7_PERF, },
};
static DEFINE_SPINLOCK(hvapi_lock);

View File

@ -837,3 +837,19 @@ ENTRY(sun4v_t5_set_perfreg)
retl
nop
ENDPROC(sun4v_t5_set_perfreg)
ENTRY(sun4v_m7_get_perfreg)
mov %o1, %o4
mov HV_FAST_M7_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_m7_get_perfreg)
ENTRY(sun4v_m7_set_perfreg)
mov HV_FAST_M7_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_m7_set_perfreg)

View File

@ -217,6 +217,31 @@ static const struct pcr_ops n5_pcr_ops = {
.pcr_nmi_disable = PCR_N4_PICNPT,
};
static u64 m7_pcr_read(unsigned long reg_num)
{
unsigned long val;
(void) sun4v_m7_get_perfreg(reg_num, &val);
return val;
}
static void m7_pcr_write(unsigned long reg_num, u64 val)
{
(void) sun4v_m7_set_perfreg(reg_num, val);
}
static const struct pcr_ops m7_pcr_ops = {
.read_pcr = m7_pcr_read,
.write_pcr = m7_pcr_write,
.read_pic = n4_pic_read,
.write_pic = n4_pic_write,
.nmi_picl_value = n4_picl_value,
.pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE |
PCR_N4_UTRACE | PCR_N4_TOE |
(26 << PCR_N4_SL_SHIFT)),
.pcr_nmi_disable = PCR_N4_PICNPT,
};
static unsigned long perf_hsvc_group;
static unsigned long perf_hsvc_major;
@ -248,6 +273,10 @@ static int __init register_perf_hsvc(void)
perf_hsvc_group = HV_GRP_T5_CPU;
break;
case SUN4V_CHIP_SPARC_M7:
perf_hsvc_group = HV_GRP_M7_PERF;
break;
default:
return -ENODEV;
}
@ -293,6 +322,10 @@ static int __init setup_sun4v_pcr_ops(void)
pcr_ops = &n5_pcr_ops;
break;
case SUN4V_CHIP_SPARC_M7:
pcr_ops = &m7_pcr_ops;
break;
default:
ret = -ENODEV;
break;

View File

@ -792,6 +792,42 @@ static const struct sparc_pmu niagara4_pmu = {
.num_pic_regs = 4,
};
static void sparc_m7_write_pmc(int idx, u64 val)
{
u64 pcr;
pcr = pcr_ops->read_pcr(idx);
/* ensure ov and ntc are reset */
pcr &= ~(PCR_N4_OV | PCR_N4_NTC);
pcr_ops->write_pic(idx, val & 0xffffffff);
pcr_ops->write_pcr(idx, pcr);
}
static const struct sparc_pmu sparc_m7_pmu = {
.event_map = niagara4_event_map,
.cache_map = &niagara4_cache_map,
.max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
.read_pmc = sparc_vt_read_pmc,
.write_pmc = sparc_m7_write_pmc,
.upper_shift = 5,
.lower_shift = 5,
.event_mask = 0x7ff,
.user_bit = PCR_N4_UTRACE,
.priv_bit = PCR_N4_STRACE,
/* We explicitly don't support hypervisor tracing. */
.hv_bit = 0,
.irq_bit = PCR_N4_TOE,
.upper_nop = 0,
.lower_nop = 0,
.flags = 0,
.max_hw_events = 4,
.num_pcrs = 4,
.num_pic_regs = 4,
};
static const struct sparc_pmu *sparc_pmu __read_mostly;
static u64 event_encoding(u64 event_id, int idx)
@ -960,6 +996,8 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
}
static void sparc_pmu_start(struct perf_event *event, int flags);
/* On this PMU each PIC has it's own PCR control register. */
static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
{
@ -972,20 +1010,13 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
struct perf_event *cp = cpuc->event[i];
struct hw_perf_event *hwc = &cp->hw;
int idx = hwc->idx;
u64 enc;
if (cpuc->current_idx[i] != PIC_NO_INDEX)
continue;
sparc_perf_event_set_period(cp, hwc, idx);
cpuc->current_idx[i] = idx;
enc = perf_event_get_enc(cpuc->events[i]);
cpuc->pcr[idx] &= ~mask_for_index(idx);
if (hwc->state & PERF_HES_STOPPED)
cpuc->pcr[idx] |= nop_for_index(idx);
else
cpuc->pcr[idx] |= event_encoding(enc, idx);
sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
for (i = 0; i < cpuc->n_events; i++) {
@ -1101,7 +1132,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
int i;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event[i]) {
@ -1127,7 +1157,6 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
}
}
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@ -1361,7 +1390,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
unsigned long flags;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
n0 = cpuc->n_events;
if (n0 >= sparc_pmu->max_hw_events)
@ -1394,7 +1422,6 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
ret = 0;
out:
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
}
@ -1667,6 +1694,10 @@ static bool __init supported_pmu(void)
sparc_pmu = &niagara4_pmu;
return true;
}
if (!strcmp(sparc_pmu_type, "sparc-m7")) {
sparc_pmu = &sparc_m7_pmu;
return true;
}
return false;
}

View File

@ -287,6 +287,8 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc);
}
touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
@ -362,6 +364,8 @@ static void pmu_snapshot_all_cpus(void)
(cpu == this_cpu ? '*' : ' '), cpu,
pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
touch_nmi_watchdog();
}
memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));

View File

@ -8,9 +8,11 @@
.text
ENTRY(memmove) /* o0=dst o1=src o2=len */
mov %o0, %g1
brz,pn %o2, 99f
mov %o0, %g1
cmp %o0, %o1
bleu,pt %xcc, memcpy
bleu,pt %xcc, 2f
add %o1, %o2, %g7
cmp %g7, %o0
bleu,pt %xcc, memcpy
@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len */
stb %g7, [%o0]
bne,pt %icc, 1b
sub %o0, 1, %o0
99:
retl
mov %g1, %o0
/* We can't just call memcpy for these memmove cases. On some
* chips the memcpy uses cache initializing stores and when dst
* and src are close enough, those can clobber the source data
* before we've loaded it in.
*/
2: or %o0, %o1, %g7
or %o2, %g7, %g7
andcc %g7, 0x7, %g0
bne,pn %xcc, 4f
nop
3: ldx [%o1], %g7
add %o1, 8, %o1
subcc %o2, 8, %o2
add %o0, 8, %o0
bne,pt %icc, 3b
stx %g7, [%o0 - 0x8]
ba,a,pt %xcc, 99b
4: ldub [%o1], %g7
add %o1, 1, %o1
subcc %o2, 1, %o2
add %o0, 1, %o0
bne,pt %icc, 4b
stb %g7, [%o0 - 0x1]
ba,a,pt %xcc, 99b
ENDPROC(memmove)

View File

@ -364,12 +364,21 @@ system_call_fastpath:
* Has incomplete stack frame and undefined top of stack.
*/
ret_from_sys_call:
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
/*
* We must check ti flags with interrupts (or at least preemption)
* off because we must *never* return to userspace without
* processing exit work that is enqueued if we're preempted here.
* In particular, returning to userspace with any of the one-shot
* flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
* very bad.
*/
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
CFI_REMEMBER_STATE
/*
* sysretq will re-enable interrupts:
@ -386,7 +395,7 @@ ret_from_sys_call:
int_ret_from_sys_call_fixup:
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
jmp int_ret_from_sys_call
jmp int_ret_from_sys_call_irqs_off
/* Do syscall tracing */
tracesys:
@ -432,6 +441,7 @@ tracesys_phase2:
GLOBAL(int_ret_from_sys_call)
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
int_ret_from_sys_call_irqs_off:
movl $_TIF_ALLWORK_MASK,%edi
/* edi: mask to check */
GLOBAL(int_with_check)

View File

@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
struct kvm_ioapic *ioapic, int vector, int trigger_mode)
{
int i;
struct kvm_lapic *apic = vcpu->arch.apic;
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
spin_lock(&ioapic->lock);
if (trigger_mode != IOAPIC_LEVEL_TRIG)
if (trigger_mode != IOAPIC_LEVEL_TRIG ||
kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
continue;
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);

View File

@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
{
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
int trigger_mode;
if (apic_test_vector(vector, apic->regs + APIC_TMR))
trigger_mode = IOAPIC_LEVEL_TRIG;

View File

@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST;
SECONDARY_EXEC_ENABLE_EPT;
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
} else
vmx->nested.nested_vmx_ept_caps = 0;
if (enable_unrestricted_guest)
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
vmx->nested.nested_vmx_misc_low,

View File

@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
struct bio_vec *bprev;
bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
return false;
}

View File

@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
/*
* We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for
* some to complete.
* some to complete. Note that hctx can be NULL here for
* reserved tag allocation.
*/
blk_mq_run_hw_queue(hctx, false);
if (hctx)
blk_mq_run_hw_queue(hctx, false);
/*
* Retry tag allocation after running the hardware queue,

View File

@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
*/
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto err_map;
goto err_mq_usage;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, 30000);
@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
if (blk_mq_init_hw_queues(q, set))
goto err_hw;
goto err_mq_usage;
mutex_lock(&all_q_mutex);
list_add_tail(&q->all_q_node, &all_q_list);
@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
return q;
err_hw:
err_mq_usage:
blk_cleanup_queue(q);
err_hctxs:
kfree(map);

View File

@ -4737,7 +4737,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
return NULL;
/* libsas case */
if (!ap->scsi_host) {
if (ap->flags & ATA_FLAG_SAS_HOST) {
tag = ata_sas_allocate_tag(ap);
if (tag < 0)
return NULL;
@ -4776,7 +4776,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
qc->tag = ATA_TAG_POISON;
if (!ap->scsi_host)
if (ap->flags & ATA_FLAG_SAS_HOST)
ata_sas_free_tag(tag, ap);
}
}

View File

@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
extern struct regcache_ops regcache_flat_ops;
static inline const char *regmap_name(const struct regmap *map)
{
if (map->dev)
return dev_name(map->dev);
return map->name;
}
#endif

View File

@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
ret = map->cache_ops->read(map, reg, value);
if (ret == 0)
trace_regmap_reg_read_cache(map->dev, reg, *value);
trace_regmap_reg_read_cache(map, reg, *value);
return ret;
}
@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
dev_dbg(map->dev, "Syncing %s cache\n",
map->cache_ops->name);
name = map->cache_ops->name;
trace_regcache_sync(map->dev, name, "start");
trace_regcache_sync(map, name, "start");
if (!map->cache_dirty)
goto out;
@ -346,7 +346,7 @@ int regcache_sync(struct regmap *map)
regmap_async_complete(map);
trace_regcache_sync(map->dev, name, "stop");
trace_regcache_sync(map, name, "stop");
return ret;
}
@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
name = map->cache_ops->name;
dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
trace_regcache_sync(map->dev, name, "start region");
trace_regcache_sync(map, name, "start region");
if (!map->cache_dirty)
goto out;
@ -401,7 +401,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
regmap_async_complete(map);
trace_regcache_sync(map->dev, name, "stop region");
trace_regcache_sync(map, name, "stop region");
return ret;
}
@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
map->lock(map->lock_arg);
trace_regcache_drop_region(map->dev, min, max);
trace_regcache_drop_region(map, min, max);
ret = map->cache_ops->drop(map, min, max);
@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
map->lock(map->lock_arg);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
trace_regmap_cache_only(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
map->lock(map->lock_arg);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
trace_regmap_cache_bypass(map, enable);
map->unlock(map->lock_arg);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);

View File

@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (map->async && map->bus->async_write) {
struct regmap_async *async;
trace_regmap_async_write_start(map->dev, reg, val_len);
trace_regmap_async_write_start(map, reg, val_len);
spin_lock_irqsave(&map->async_lock, flags);
async = list_first_entry_or_null(&map->async_free,
@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
return ret;
}
trace_regmap_hw_write_start(map->dev, reg,
val_len / map->format.val_bytes);
trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
/* If we're doing a single register write we can probably just
* send the work_buf directly, otherwise try to do a gather
@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(buf);
}
trace_regmap_hw_write_done(map->dev, reg,
val_len / map->format.val_bytes);
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map->dev, reg, 1);
trace_regmap_hw_write_start(map, reg, 1);
ret = map->bus->write(map->bus_context, map->work_buf,
map->format.buf_size);
trace_regmap_hw_write_done(map->dev, reg, 1);
trace_regmap_hw_write_done(map, reg, 1);
return ret;
}
@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
dev_info(map->dev, "%x <= %x\n", reg, val);
#endif
trace_regmap_reg_write(map->dev, reg, val);
trace_regmap_reg_write(map, reg, val);
return map->reg_write(context, reg, val);
}
@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
int val = regs[i].def;
trace_regmap_hw_write_start(map->dev, reg, 1);
trace_regmap_hw_write_start(map, reg, 1);
map->format.format_reg(u8, reg, map->reg_shift);
u8 += reg_bytes + pad_bytes;
map->format.format_val(u8, val, 0);
@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
for (i = 0; i < num_regs; i++) {
int reg = regs[i].reg;
trace_regmap_hw_write_done(map->dev, reg, 1);
trace_regmap_hw_write_done(map, reg, 1);
}
return ret;
}
@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
*/
u8[0] |= map->read_flag_mask;
trace_regmap_hw_read_start(map->dev, reg,
val_len / map->format.val_bytes);
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
ret = map->bus->read(map->bus_context, map->work_buf,
map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
trace_regmap_hw_read_done(map->dev, reg,
val_len / map->format.val_bytes);
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
return ret;
}
@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
dev_info(map->dev, "%x => %x\n", reg, *val);
#endif
trace_regmap_reg_read(map->dev, reg, *val);
trace_regmap_reg_read(map, reg, *val);
if (!map->cache_bypass)
regcache_write(map, reg, *val);
@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
struct regmap *map = async->map;
bool wake;
trace_regmap_async_io_complete(map->dev);
trace_regmap_async_io_complete(map);
spin_lock(&map->async_lock);
list_move(&async->list, &map->async_free);
@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
if (!map->bus || !map->bus->async_write)
return 0;
trace_regmap_async_complete_start(map->dev);
trace_regmap_async_complete_start(map);
wait_event(map->async_waitq, regmap_async_is_done(map));
@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
map->async_ret = 0;
spin_unlock_irqrestore(&map->async_lock, flags);
trace_regmap_async_complete_done(map->dev);
trace_regmap_async_complete_done(map);
return ret;
}

View File

@ -803,10 +803,6 @@ static int __init nbd_init(void)
return -EINVAL;
}
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
if (!nbd_dev)
return -ENOMEM;
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
@ -828,6 +824,10 @@ static int __init nbd_init(void)
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
if (!nbd_dev)
return -ENOMEM;
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)

View File

@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
get_device(dev->device);
INIT_LIST_HEAD(&dev->node);
INIT_WORK(&dev->probe_work, nvme_async_probe);
schedule_work(&dev->probe_work);
return 0;

View File

@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_CMT
help
This enables build of a clocksource and clockevent driver for
@ -201,6 +202,7 @@ config SH_TIMER_CMT
config SH_TIMER_MTU2
bool "Renesas MTU2 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_MTU2
help
This enables build of a clockevent driver for the Multi-Function
@ -210,6 +212,7 @@ config SH_TIMER_MTU2
config SH_TIMER_TMU
bool "Renesas TMU timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_TMU
help
This enables build of a clocksource and clockevent driver for

View File

@ -17,7 +17,6 @@
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/reset.h>
#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
.dev_id = &sun5i_clockevent,
};
static u64 sun5i_timer_sched_read(void)
{
return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
}
static void __init sun5i_timer_init(struct device_node *node)
{
struct reset_control *rstc;
@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
timer_base + TIMER_CTL_REG(1));
sched_clock_register(sun5i_timer_sched_read, 32, rate);
clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
rate, 340, 32, clocksource_mmio_readl_down);

View File

@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
}
EXPORT_SYMBOL(drm_framebuffer_reference);
static void drm_framebuffer_free_bug(struct kref *kref)
{
BUG();
}
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
return;
}
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->old_fb);
drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
plane->fb = NULL;
plane->crtc = NULL;

View File

@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
WARN_ON(i915_verify_lists(ring->dev));
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
/* Retire requests first as we use it above for the early return.
* If we retire requests last, we may use a later seqno and so clear
* the requests lists without clearing the active list, leading to
* confusion.
*/
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
if (!i915_gem_request_completed(obj->last_read_req, true))
break;
i915_gem_object_move_to_inactive(obj);
}
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
struct intel_ringbuffer *ringbuf;
@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
i915_gem_free_request(request);
}
/* Move any buffers on the active list that are no longer referenced
* by the ringbuffer to the flushing/inactive lists as appropriate,
* before we free the context associated with the requests.
*/
while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
if (!i915_gem_request_completed(obj->last_read_req, true))
break;
i915_gem_object_move_to_inactive(obj);
}
if (unlikely(ring->trace_irq_req &&
i915_gem_request_completed(ring->trace_irq_req, true))) {
ring->irq_put(ring);

View File

@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
if (!intel_crtc->base.primary->fb)
return;
if (intel_alloc_plane_obj(intel_crtc, plane_config))
if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
struct drm_plane *primary = intel_crtc->base.primary;
primary->state->crtc = &intel_crtc->base;
primary->crtc = &intel_crtc->base;
update_state_fb(primary);
return;
}
kfree(intel_crtc->base.primary->fb);
intel_crtc->base.primary->fb = NULL;
@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
continue;
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
struct drm_plane *primary = intel_crtc->base.primary;
if (obj->tiling_mode != I915_TILING_NONE)
dev_priv->preserve_bios_swizzle = true;
drm_framebuffer_reference(c->primary->fb);
intel_crtc->base.primary->fb = c->primary->fb;
primary->fb = c->primary->fb;
primary->state->crtc = &intel_crtc->base;
primary->crtc = &intel_crtc->base;
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
break;
}
@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size);
crtc->base.primary->fb = fb;
update_state_fb(crtc->base.primary);
}
static void chv_crtc_clock_get(struct intel_crtc *crtc,
@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size);
crtc->base.primary->fb = fb;
update_state_fb(crtc->base.primary);
return;
error:
@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size);
crtc->base.primary->fb = fb;
update_state_fb(crtc->base.primary);
}
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,

View File

@ -33,6 +33,7 @@
#include <linux/export.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/dma-contiguous.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@ -126,6 +127,11 @@ static int __init alloc_passthrough_domain(void);
*
****************************************************************************/
static struct protection_domain *to_pdomain(struct iommu_domain *dom)
{
return container_of(dom, struct protection_domain, domain);
}
static struct iommu_dev_data *alloc_dev_data(u16 devid)
{
struct iommu_dev_data *dev_data;
@ -1321,7 +1327,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
* This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it.
*/
static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
static u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size)
{
int level;
u64 *pte;
@ -1329,8 +1337,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
if (address > PM_LEVEL_SIZE(domain->mode))
return NULL;
level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) {
@ -1339,19 +1348,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
return NULL;
/* Large PTE */
if (PM_PTE_LEVEL(*pte) == 0x07) {
unsigned long pte_mask, __pte;
/*
* If we have a series of large PTEs, make
* sure to return a pointer to the first one.
*/
pte_mask = PTE_PAGE_SIZE(*pte);
pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
__pte = ((unsigned long)pte) & pte_mask;
return (u64 *)__pte;
}
if (PM_PTE_LEVEL(*pte) == 7 ||
PM_PTE_LEVEL(*pte) == 0)
break;
/* No level skipping support yet */
if (PM_PTE_LEVEL(*pte) != level)
@ -1360,8 +1359,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
level -= 1;
/* Walk to the next level */
pte = IOMMU_PTE_PAGE(*pte);
pte = &pte[PM_LEVEL_INDEX(level, address)];
pte = IOMMU_PTE_PAGE(*pte);
pte = &pte[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
}
if (PM_PTE_LEVEL(*pte) == 0x07) {
unsigned long pte_mask;
/*
* If we have a series of large PTEs, make
* sure to return a pointer to the first one.
*/
*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
pte = (u64 *)(((unsigned long)pte) & pte_mask);
}
return pte;
@ -1383,13 +1395,14 @@ static int iommu_map_page(struct protection_domain *dom,
u64 __pte, *pte;
int i, count;
BUG_ON(!IS_ALIGNED(bus_addr, page_size));
BUG_ON(!IS_ALIGNED(phys_addr, page_size));
if (!(prot & IOMMU_PROT_MASK))
return -EINVAL;
bus_addr = PAGE_ALIGN(bus_addr);
phys_addr = PAGE_ALIGN(phys_addr);
count = PAGE_SIZE_PTE_COUNT(page_size);
pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
count = PAGE_SIZE_PTE_COUNT(page_size);
pte = alloc_pte(dom, bus_addr, page_size, NULL, GFP_KERNEL);
if (!pte)
return -ENOMEM;
@ -1398,7 +1411,7 @@ static int iommu_map_page(struct protection_domain *dom,
if (IOMMU_PTE_PRESENT(pte[i]))
return -EBUSY;
if (page_size > PAGE_SIZE) {
if (count > 1) {
__pte = PAGE_SIZE_PTE(phys_addr, page_size);
__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_P | IOMMU_PTE_FC;
} else
@ -1421,7 +1434,8 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
unsigned long bus_addr,
unsigned long page_size)
{
unsigned long long unmap_size, unmapped;
unsigned long long unmapped;
unsigned long unmap_size;
u64 *pte;
BUG_ON(!is_power_of_2(page_size));
@ -1430,28 +1444,12 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
while (unmapped < page_size) {
pte = fetch_pte(dom, bus_addr);
pte = fetch_pte(dom, bus_addr, &unmap_size);
if (!pte) {
/*
* No PTE for this address
* move forward in 4kb steps
*/
unmap_size = PAGE_SIZE;
} else if (PM_PTE_LEVEL(*pte) == 0) {
/* 4kb PTE found for this address */
unmap_size = PAGE_SIZE;
*pte = 0ULL;
} else {
int count, i;
if (pte) {
int i, count;
/* Large PTE found which maps this address */
unmap_size = PTE_PAGE_SIZE(*pte);
/* Only unmap from the first pte in the page */
if ((unmap_size - 1) & bus_addr)
break;
count = PAGE_SIZE_PTE_COUNT(unmap_size);
count = PAGE_SIZE_PTE_COUNT(unmap_size);
for (i = 0; i < count; i++)
pte[i] = 0ULL;
}
@ -1599,7 +1597,7 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
{
int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT;
struct amd_iommu *iommu;
unsigned long i, old_size;
unsigned long i, old_size, pte_pgsize;
#ifdef CONFIG_IOMMU_STRESS
populate = false;
@ -1672,12 +1670,13 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
*/
for (i = dma_dom->aperture[index]->offset;
i < dma_dom->aperture_size;
i += PAGE_SIZE) {
u64 *pte = fetch_pte(&dma_dom->domain, i);
i += pte_pgsize) {
u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue;
dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT, 1);
dma_ops_reserve_addresses(dma_dom, i >> PAGE_SHIFT,
pte_pgsize >> 12);
}
update_domain(&dma_dom->domain);
@ -2422,16 +2421,6 @@ static int device_change_notifier(struct notifier_block *nb,
dev_data = get_dev_data(dev);
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
domain = domain_for_device(dev);
if (!domain)
goto out;
if (dev_data->passthrough)
break;
detach_device(dev);
break;
case BUS_NOTIFY_ADD_DEVICE:
iommu_init_device(dev);
@ -2467,7 +2456,7 @@ static int device_change_notifier(struct notifier_block *nb,
dev->archdata.dma_ops = &amd_iommu_dma_ops;
break;
case BUS_NOTIFY_DEL_DEVICE:
case BUS_NOTIFY_REMOVED_DEVICE:
iommu_uninit_device(dev);
@ -2923,38 +2912,42 @@ static void *alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
struct dma_attrs *attrs)
{
unsigned long flags;
void *virt_addr;
struct protection_domain *domain;
phys_addr_t paddr;
u64 dma_mask = dev->coherent_dma_mask;
struct protection_domain *domain;
unsigned long flags;
struct page *page;
INC_STATS_COUNTER(cnt_alloc_coherent);
domain = get_domain(dev);
if (PTR_ERR(domain) == -EINVAL) {
virt_addr = (void *)__get_free_pages(flag, get_order(size));
*dma_addr = __pa(virt_addr);
return virt_addr;
page = alloc_pages(flag, get_order(size));
*dma_addr = page_to_phys(page);
return page_address(page);
} else if (IS_ERR(domain))
return NULL;
size = PAGE_ALIGN(size);
dma_mask = dev->coherent_dma_mask;
flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
flag |= __GFP_ZERO;
virt_addr = (void *)__get_free_pages(flag, get_order(size));
if (!virt_addr)
return NULL;
page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
if (!page) {
if (!(flag & __GFP_WAIT))
return NULL;
paddr = virt_to_phys(virt_addr);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
if (!page)
return NULL;
}
if (!dma_mask)
dma_mask = *dev->dma_mask;
spin_lock_irqsave(&domain->lock, flags);
*dma_addr = __map_single(dev, domain->priv, paddr,
*dma_addr = __map_single(dev, domain->priv, page_to_phys(page),
size, DMA_BIDIRECTIONAL, true, dma_mask);
if (*dma_addr == DMA_ERROR_CODE) {
@ -2966,11 +2959,12 @@ static void *alloc_coherent(struct device *dev, size_t size,
spin_unlock_irqrestore(&domain->lock, flags);
return virt_addr;
return page_address(page);
out_free:
free_pages((unsigned long)virt_addr, get_order(size));
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, get_order(size));
return NULL;
}
@ -2982,11 +2976,15 @@ static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
unsigned long flags;
struct protection_domain *domain;
unsigned long flags;
struct page *page;
INC_STATS_COUNTER(cnt_free_coherent);
page = virt_to_page(virt_addr);
size = PAGE_ALIGN(size);
domain = get_domain(dev);
if (IS_ERR(domain))
goto free_mem;
@ -3000,7 +2998,8 @@ static void free_coherent(struct device *dev, size_t size,
spin_unlock_irqrestore(&domain->lock, flags);
free_mem:
free_pages((unsigned long)virt_addr, get_order(size));
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, get_order(size));
}
/*
@ -3236,42 +3235,45 @@ static int __init alloc_passthrough_domain(void)
return 0;
}
static int amd_iommu_domain_init(struct iommu_domain *dom)
static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
/* We only support unmanaged domains for now */
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
pdomain = protection_domain_alloc();
if (!pdomain)
goto out_free;
pdomain->mode = PAGE_MODE_3_LEVEL;
pdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
if (!pdomain->pt_root)
goto out_free;
pdomain->domain.geometry.aperture_start = 0;
pdomain->domain.geometry.aperture_end = ~0ULL;
pdomain->domain.geometry.force_aperture = true;
return &pdomain->domain;
out_free:
protection_domain_free(pdomain);
return NULL;
}
static void amd_iommu_domain_free(struct iommu_domain *dom)
{
struct protection_domain *domain;
domain = protection_domain_alloc();
if (!domain)
goto out_free;
domain->mode = PAGE_MODE_3_LEVEL;
domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL);
if (!domain->pt_root)
goto out_free;
domain->iommu_domain = dom;
dom->priv = domain;
dom->geometry.aperture_start = 0;
dom->geometry.aperture_end = ~0ULL;
dom->geometry.force_aperture = true;
return 0;
out_free:
protection_domain_free(domain);
return -ENOMEM;
}
static void amd_iommu_domain_destroy(struct iommu_domain *dom)
{
struct protection_domain *domain = dom->priv;
if (!domain)
if (!dom)
return;
domain = to_pdomain(dom);
if (domain->dev_cnt > 0)
cleanup_domain(domain);
@ -3284,8 +3286,6 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
free_gcr3_table(domain);
protection_domain_free(domain);
dom->priv = NULL;
}
static void amd_iommu_detach_device(struct iommu_domain *dom,
@ -3313,7 +3313,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
struct iommu_dev_data *dev_data;
struct amd_iommu *iommu;
int ret;
@ -3340,7 +3340,7 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
int prot = 0;
int ret;
@ -3362,7 +3362,7 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
size_t page_size)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
size_t unmap_size;
if (domain->mode == PAGE_MODE_NONE)
@ -3380,28 +3380,22 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova)
{
struct protection_domain *domain = dom->priv;
unsigned long offset_mask;
phys_addr_t paddr;
struct protection_domain *domain = to_pdomain(dom);
unsigned long offset_mask, pte_pgsize;
u64 *pte, __pte;
if (domain->mode == PAGE_MODE_NONE)
return iova;
pte = fetch_pte(domain, iova);
pte = fetch_pte(domain, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0;
if (PM_PTE_LEVEL(*pte) == 0)
offset_mask = PAGE_SIZE - 1;
else
offset_mask = PTE_PAGE_SIZE(*pte) - 1;
offset_mask = pte_pgsize - 1;
__pte = *pte & PM_ADDR_MASK;
__pte = *pte & PM_ADDR_MASK;
paddr = (__pte & ~offset_mask) | (iova & offset_mask);
return paddr;
return (__pte & ~offset_mask) | (iova & offset_mask);
}
static bool amd_iommu_capable(enum iommu_cap cap)
@ -3420,8 +3414,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
static const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
.domain_alloc = amd_iommu_domain_alloc,
.domain_free = amd_iommu_domain_free,
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
@ -3483,7 +3477,7 @@ EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
void amd_iommu_domain_direct_map(struct iommu_domain *dom)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
spin_lock_irqsave(&domain->lock, flags);
@ -3504,7 +3498,7 @@ EXPORT_SYMBOL(amd_iommu_domain_direct_map);
int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int levels, ret;
@ -3616,7 +3610,7 @@ static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
u64 address)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@ -3638,7 +3632,7 @@ static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@ -3718,7 +3712,7 @@ static int __clear_gcr3(struct protection_domain *domain, int pasid)
int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
unsigned long cr3)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@ -3732,7 +3726,7 @@ EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
{
struct protection_domain *domain = dom->priv;
struct protection_domain *domain = to_pdomain(dom);
unsigned long flags;
int ret;
@ -3765,17 +3759,17 @@ EXPORT_SYMBOL(amd_iommu_complete_ppr);
struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
{
struct protection_domain *domain;
struct protection_domain *pdomain;
domain = get_domain(&pdev->dev);
if (IS_ERR(domain))
pdomain = get_domain(&pdev->dev);
if (IS_ERR(pdomain))
return NULL;
/* Only return IOMMUv2 domains */
if (!(domain->flags & PD_IOMMUV2_MASK))
if (!(pdomain->flags & PD_IOMMUV2_MASK))
return NULL;
return domain->iommu_domain;
return &pdomain->domain;
}
EXPORT_SYMBOL(amd_iommu_get_v2_domain);

View File

@ -282,6 +282,12 @@
#define PTE_PAGE_SIZE(pte) \
(1ULL << (1 + ffz(((pte) | 0xfffULL))))
/*
* Takes a page-table level and returns the default page-size for this level
*/
#define PTE_LEVEL_PAGE_SIZE(level) \
(1ULL << (12 + (9 * (level))))
#define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
@ -400,6 +406,8 @@ struct iommu_domain;
struct protection_domain {
struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/
struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */
@ -411,10 +419,7 @@ struct protection_domain {
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
void *priv; /* private data */
struct iommu_domain *iommu_domain; /* Pointer to generic
domain structure */
void *priv; /* private data */
};
/*

View File

@ -417,7 +417,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
dev_state = pasid_state->device_state;
run_inv_ctx_cb = !pasid_state->invalid;
if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb)
if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
unbind_pasid(pasid_state);

View File

@ -343,6 +343,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */
struct iommu_domain domain;
};
static struct iommu_ops arm_smmu_ops;
@ -360,6 +361,11 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
{ 0, NULL},
};
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
}
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@ -645,7 +651,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
u32 fsr, far, fsynr, resume;
unsigned long iova;
struct iommu_domain *domain = dev;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *cb_base;
@ -730,6 +736,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
if (smmu->version > ARM_SMMU_V1) {
/*
* CBA2R.
* *Must* be initialised before CBAR thanks to VMID16
* architectural oversight affected some implementations.
*/
#ifdef CONFIG_64BIT
reg = CBA2R_RW64_64BIT;
#else
reg = CBA2R_RW64_32BIT;
#endif
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
/* CBAR */
reg = cfg->cbar;
if (smmu->version == ARM_SMMU_V1)
@ -747,16 +767,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
}
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
if (smmu->version > ARM_SMMU_V1) {
/* CBA2R */
#ifdef CONFIG_64BIT
reg = CBA2R_RW64_64BIT;
#else
reg = CBA2R_RW64_32BIT;
#endif
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
}
/* TTBRs */
if (stage1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
@ -836,7 +846,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
mutex_lock(&smmu_domain->init_mutex);
@ -958,7 +968,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *cb_base;
@ -985,10 +995,12 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
}
static int arm_smmu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{
struct arm_smmu_domain *smmu_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
/*
* Allocate the domain and initialise some of its data structures.
* We can't really do anything meaningful until we've added a
@ -996,17 +1008,17 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
if (!smmu_domain)
return -ENOMEM;
return NULL;
mutex_init(&smmu_domain->init_mutex);
spin_lock_init(&smmu_domain->pgtbl_lock);
domain->priv = smmu_domain;
return 0;
return &smmu_domain->domain;
}
static void arm_smmu_domain_destroy(struct iommu_domain *domain)
static void arm_smmu_domain_free(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
/*
* Free the domain resources. We assume that all devices have
@ -1143,7 +1155,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
@ -1187,7 +1199,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master_cfg *cfg;
cfg = find_smmu_master_cfg(dev);
@ -1203,7 +1215,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
{
int ret;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@ -1220,7 +1232,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
{
size_t ret;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@ -1235,7 +1247,7 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
@ -1281,7 +1293,7 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
{
phys_addr_t ret;
unsigned long flags;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
if (!ops)
@ -1329,61 +1341,83 @@ static void __arm_smmu_release_pci_iommudata(void *data)
kfree(data);
}
static int arm_smmu_add_device(struct device *dev)
static int arm_smmu_add_pci_device(struct pci_dev *pdev)
{
struct arm_smmu_device *smmu;
struct arm_smmu_master_cfg *cfg;
int i, ret;
u16 sid;
struct iommu_group *group;
void (*releasefn)(void *) = NULL;
int ret;
struct arm_smmu_master_cfg *cfg;
smmu = find_smmu_for_device(dev);
if (!smmu)
return -ENODEV;
group = iommu_group_alloc();
if (IS_ERR(group)) {
dev_err(dev, "Failed to allocate IOMMU group\n");
group = iommu_group_get_for_dev(&pdev->dev);
if (IS_ERR(group))
return PTR_ERR(group);
}
if (dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(dev);
cfg = iommu_group_get_iommudata(group);
if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg) {
ret = -ENOMEM;
goto out_put_group;
}
cfg->num_streamids = 1;
/*
* Assume Stream ID == Requester ID for now.
* We need a way to describe the ID mappings in FDT.
*/
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid,
&cfg->streamids[0]);
releasefn = __arm_smmu_release_pci_iommudata;
} else {
struct arm_smmu_master *master;
master = find_smmu_master(smmu, dev->of_node);
if (!master) {
ret = -ENODEV;
goto out_put_group;
}
cfg = &master->cfg;
iommu_group_set_iommudata(group, cfg,
__arm_smmu_release_pci_iommudata);
}
iommu_group_set_iommudata(group, cfg, releasefn);
ret = iommu_group_add_device(group, dev);
if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) {
ret = -ENOSPC;
goto out_put_group;
}
/*
* Assume Stream ID == Requester ID for now.
* We need a way to describe the ID mappings in FDT.
*/
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
for (i = 0; i < cfg->num_streamids; ++i)
if (cfg->streamids[i] == sid)
break;
/* Avoid duplicate SIDs, as this can lead to SMR conflicts */
if (i == cfg->num_streamids)
cfg->streamids[cfg->num_streamids++] = sid;
return 0;
out_put_group:
iommu_group_put(group);
return ret;
}
static int arm_smmu_add_platform_device(struct device *dev)
{
struct iommu_group *group;
struct arm_smmu_master *master;
struct arm_smmu_device *smmu = find_smmu_for_device(dev);
if (!smmu)
return -ENODEV;
master = find_smmu_master(smmu, dev->of_node);
if (!master)
return -ENODEV;
/* No automatic group creation for platform devices */
group = iommu_group_alloc();
if (IS_ERR(group))
return PTR_ERR(group);
iommu_group_set_iommudata(group, &master->cfg, NULL);
return iommu_group_add_device(group, dev);
}
static int arm_smmu_add_device(struct device *dev)
{
if (dev_is_pci(dev))
return arm_smmu_add_pci_device(to_pci_dev(dev));
return arm_smmu_add_platform_device(dev);
}
static void arm_smmu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
@ -1392,7 +1426,7 @@ static void arm_smmu_remove_device(struct device *dev)
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
switch (attr) {
case DOMAIN_ATTR_NESTING:
@ -1407,7 +1441,7 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
int ret = 0;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
mutex_lock(&smmu_domain->init_mutex);
@ -1435,8 +1469,8 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init,
.domain_destroy = arm_smmu_domain_destroy,
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.detach_dev = arm_smmu_detach_dev,
.map = arm_smmu_map,
@ -1633,6 +1667,15 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
smmu->pa_size = size;
/*
* What the page table walker can address actually depends on which
* descriptor format is in use, but since a) we don't know that yet,
* and b) it can vary per context bank, this will have to do...
*/
if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
dev_warn(smmu->dev,
"failed to set DMA mask for table walker\n");
if (smmu->version == ARM_SMMU_V1) {
smmu->va_size = smmu->ipa_size;
size = SZ_4K | SZ_2M | SZ_1G;

View File

@ -200,6 +200,7 @@ struct exynos_iommu_domain {
short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
struct iommu_domain domain; /* generic domain data structure */
};
struct sysmmu_drvdata {
@ -214,6 +215,11 @@ struct sysmmu_drvdata {
phys_addr_t pgtable;
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
{
return container_of(dom, struct exynos_iommu_domain, domain);
}
static bool set_sysmmu_active(struct sysmmu_drvdata *data)
{
/* return true if the System MMU was not active previously
@ -696,58 +702,60 @@ static inline void pgtable_flush(void *vastart, void *vaend)
virt_to_phys(vaend));
}
static int exynos_iommu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
{
struct exynos_iommu_domain *priv;
struct exynos_iommu_domain *exynos_domain;
int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!priv->pgtable)
exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
if (!exynos_domain)
return NULL;
exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!exynos_domain->pgtable)
goto err_pgtable;
priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!priv->lv2entcnt)
exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!exynos_domain->lv2entcnt)
goto err_counter;
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
priv->pgtable[i + 0] = ZERO_LV2LINK;
priv->pgtable[i + 1] = ZERO_LV2LINK;
priv->pgtable[i + 2] = ZERO_LV2LINK;
priv->pgtable[i + 3] = ZERO_LV2LINK;
priv->pgtable[i + 4] = ZERO_LV2LINK;
priv->pgtable[i + 5] = ZERO_LV2LINK;
priv->pgtable[i + 6] = ZERO_LV2LINK;
priv->pgtable[i + 7] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
}
pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->pgtablelock);
INIT_LIST_HEAD(&priv->clients);
spin_lock_init(&exynos_domain->lock);
spin_lock_init(&exynos_domain->pgtablelock);
INIT_LIST_HEAD(&exynos_domain->clients);
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = ~0UL;
domain->geometry.force_aperture = true;
exynos_domain->domain.geometry.aperture_start = 0;
exynos_domain->domain.geometry.aperture_end = ~0UL;
exynos_domain->domain.geometry.force_aperture = true;
domain->priv = priv;
return 0;
return &exynos_domain->domain;
err_counter:
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)exynos_domain->pgtable, 2);
err_pgtable:
kfree(priv);
return -ENOMEM;
kfree(exynos_domain);
return NULL;
}
static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
static void exynos_iommu_domain_free(struct iommu_domain *domain)
{
struct exynos_iommu_domain *priv = domain->priv;
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
struct exynos_iommu_owner *owner;
unsigned long flags;
int i;
@ -773,15 +781,14 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1);
kfree(domain->priv);
domain->priv = NULL;
kfree(priv);
}
static int exynos_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *priv = domain->priv;
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
int ret;
@ -812,7 +819,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct exynos_iommu_owner *owner;
struct exynos_iommu_domain *priv = domain->priv;
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags;
@ -988,7 +995,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
phys_addr_t paddr, size_t size, int prot)
{
struct exynos_iommu_domain *priv = domain->priv;
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry;
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
unsigned long flags;
@ -1042,7 +1049,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
static size_t exynos_iommu_unmap(struct iommu_domain *domain,
unsigned long l_iova, size_t size)
{
struct exynos_iommu_domain *priv = domain->priv;
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
sysmmu_pte_t *ent;
size_t err_pgsize;
@ -1119,7 +1126,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct exynos_iommu_domain *priv = domain->priv;
struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry;
unsigned long flags;
phys_addr_t phys = 0;
@ -1171,8 +1178,8 @@ static void exynos_iommu_remove_device(struct device *dev)
}
static const struct iommu_ops exynos_iommu_ops = {
.domain_init = exynos_iommu_domain_init,
.domain_destroy = exynos_iommu_domain_destroy,
.domain_alloc = exynos_iommu_domain_alloc,
.domain_free = exynos_iommu_domain_free,
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,

View File

@ -33,6 +33,11 @@ static struct kmem_cache *fsl_pamu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;
static DEFINE_SPINLOCK(device_domain_lock);
static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
{
return container_of(dom, struct fsl_dma_domain, iommu_domain);
}
static int __init iommu_init_mempool(void)
{
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
@ -65,7 +70,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom;
geom = &dma_domain->iommu_domain->geometry;
geom = &dma_domain->iommu_domain.geometry;
if (!win_cnt || !dma_domain->geom_size) {
pr_debug("Number of windows/geometry not configured for the domain\n");
@ -123,7 +128,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
{
int ret;
struct dma_window *wnd = &dma_domain->win_arr[0];
phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags);
@ -172,7 +177,7 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
} else {
phys_addr_t wnd_addr;
wnd_addr = dma_domain->iommu_domain->geometry.aperture_start;
wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
ret = pamu_config_ppaace(liodn, wnd_addr,
wnd->size,
@ -384,7 +389,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
if (iova < domain->geometry.aperture_start ||
iova > domain->geometry.aperture_end)
@ -398,11 +403,9 @@ static bool fsl_pamu_capable(enum iommu_cap cap)
return cap == IOMMU_CAP_CACHE_COHERENCY;
}
static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
static void fsl_pamu_domain_free(struct iommu_domain *domain)
{
struct fsl_dma_domain *dma_domain = domain->priv;
domain->priv = NULL;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
/* remove all the devices from the device list */
detach_device(NULL, dma_domain);
@ -413,23 +416,24 @@ static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
}
static int fsl_pamu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
{
struct fsl_dma_domain *dma_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
dma_domain = iommu_alloc_dma_domain();
if (!dma_domain) {
pr_debug("dma_domain allocation failed\n");
return -ENOMEM;
return NULL;
}
domain->priv = dma_domain;
dma_domain->iommu_domain = domain;
/* defaul geometry 64 GB i.e. maximum system address */
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = (1ULL << 36) - 1;
domain->geometry.force_aperture = true;
dma_domain->iommu_domain. geometry.aperture_start = 0;
dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
dma_domain->iommu_domain.geometry.force_aperture = true;
return 0;
return &dma_domain->iommu_domain;
}
/* Configure geometry settings for all LIODNs associated with domain */
@ -499,7 +503,7 @@ static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
@ -530,7 +534,7 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
struct dma_window *wnd;
int pamu_prot = 0;
int ret;
@ -607,7 +611,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
int num)
{
unsigned long flags;
struct iommu_domain *domain = dma_domain->iommu_domain;
struct iommu_domain *domain = &dma_domain->iommu_domain;
int ret = 0;
int i;
@ -653,7 +657,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
static int fsl_pamu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *liodn;
u32 liodn_cnt;
int len, ret = 0;
@ -691,7 +695,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
static void fsl_pamu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *prop;
int len;
struct pci_dev *pdev = NULL;
@ -723,7 +727,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
static int configure_domain_geometry(struct iommu_domain *domain, void *data)
{
struct iommu_domain_geometry *geom_attr = data;
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
dma_addr_t geom_size;
unsigned long flags;
@ -813,7 +817,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0;
switch (attr_type) {
@ -838,7 +842,7 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0;
switch (attr_type) {
@ -999,7 +1003,7 @@ static void fsl_pamu_remove_device(struct device *dev)
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
int ret;
@ -1048,15 +1052,15 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
{
struct fsl_dma_domain *dma_domain = domain->priv;
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
return dma_domain->win_cnt;
}
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_init = fsl_pamu_domain_init,
.domain_destroy = fsl_pamu_domain_destroy,
.domain_alloc = fsl_pamu_domain_alloc,
.domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
.domain_window_enable = fsl_pamu_window_enable,

View File

@ -71,7 +71,7 @@ struct fsl_dma_domain {
u32 stash_id;
struct pamu_stash_attribute dma_stash;
u32 snoop_id;
struct iommu_domain *iommu_domain;
struct iommu_domain iommu_domain;
spinlock_t domain_lock;
};

View File

@ -339,7 +339,7 @@ struct dmar_domain {
DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
@ -358,6 +358,9 @@ struct dmar_domain {
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
struct iommu_domain domain; /* generic domain data structure for
iommu core */
};
/* PCI domain-device relationship */
@ -449,6 +452,12 @@ static LIST_HEAD(device_domain_list);
static const struct iommu_ops intel_iommu_ops;
/* Convert generic 'struct iommu_domain to private struct dmar_domain */
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{
return container_of(dom, struct dmar_domain, domain);
}
static int __init intel_iommu_setup(char *str)
{
if (!str)
@ -595,12 +604,13 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int i, found = 0;
bool found = false;
int i;
domain->iommu_coherency = 1;
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
found = 1;
found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0;
break;
@ -1267,7 +1277,7 @@ static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
int found = 0;
bool found = false;
unsigned long flags;
struct device_domain_info *info;
struct pci_dev *pdev;
@ -1282,7 +1292,7 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
found = 1;
found = true;
break;
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@ -4269,7 +4279,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct device_domain_info *info, *tmp;
struct intel_iommu *iommu;
unsigned long flags;
int found = 0;
bool found = false;
u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn);
@ -4301,7 +4311,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
* update iommu count and coherency
*/
if (info->iommu == iommu)
found = 1;
found = true;
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@ -4339,44 +4349,45 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
return 0;
}
static int intel_iommu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) {
printk(KERN_ERR
"intel_iommu_domain_init: dmar_domain == NULL\n");
return -ENOMEM;
return NULL;
}
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR
"intel_iommu_domain_init() failed\n");
domain_exit(dmar_domain);
return -ENOMEM;
return NULL;
}
domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain;
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true;
return 0;
return domain;
}
static void intel_iommu_domain_destroy(struct iommu_domain *domain)
static void intel_iommu_domain_free(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = domain->priv;
domain->priv = NULL;
domain_exit(dmar_domain);
domain_exit(to_dmar_domain(domain));
}
static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct dmar_domain *dmar_domain = domain->priv;
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu;
int addr_width;
u8 bus, devfn;
@ -4441,16 +4452,14 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct dmar_domain *dmar_domain = domain->priv;
domain_remove_one_dev_info(dmar_domain, dev);
domain_remove_one_dev_info(to_dmar_domain(domain), dev);
}
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr;
int prot = 0;
int ret;
@ -4487,7 +4496,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL;
struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn;
@ -4535,7 +4544,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct dmar_domain *dmar_domain = domain->priv;
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dma_pte *pte;
int level = 0;
u64 phys = 0;
@ -4594,8 +4603,8 @@ static void intel_iommu_remove_device(struct device *dev)
static const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy,
.domain_alloc = intel_iommu_domain_alloc,
.domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map,

View File

@ -631,7 +631,7 @@ static int __init intel_enable_irq_remapping(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int setup = 0;
bool setup = false;
int eim = 0;
if (x2apic_supported()) {
@ -697,7 +697,7 @@ static int __init intel_enable_irq_remapping(void)
*/
for_each_iommu(iommu, drhd) {
iommu_set_irq_remapping(iommu, eim);
setup = 1;
setup = true;
}
if (!setup)
@ -856,7 +856,7 @@ static int __init parse_ioapics_under_ir(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int ir_supported = 0;
bool ir_supported = false;
int ioapic_idx;
for_each_iommu(iommu, drhd)
@ -864,7 +864,7 @@ static int __init parse_ioapics_under_ir(void)
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1;
ir_supported = 1;
ir_supported = true;
}
if (!ir_supported)
@ -917,7 +917,7 @@ static void disable_irq_remapping(void)
static int reenable_irq_remapping(int eim)
{
struct dmar_drhd_unit *drhd;
int setup = 0;
bool setup = false;
struct intel_iommu *iommu = NULL;
for_each_iommu(iommu, drhd)
@ -933,7 +933,7 @@ static int reenable_irq_remapping(int eim)
/* Set up interrupt remapping for iommu.*/
iommu_set_irq_remapping(iommu, eim);
setup = 1;
setup = true;
}
if (!setup)

View File

@ -116,6 +116,8 @@
#define ARM_32_LPAE_TCR_EAE (1 << 31)
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
#define ARM_LPAE_TCR_EPD1 (1 << 23)
#define ARM_LPAE_TCR_TG0_4K (0 << 14)
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
}
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
/* Disable speculative walks through TTBR1 */
reg |= ARM_LPAE_TCR_EPD1;
cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */

View File

@ -901,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{
struct iommu_domain *domain;
int ret;
if (bus == NULL || bus->iommu_ops == NULL)
return NULL;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
if (!domain)
return NULL;
domain->ops = bus->iommu_ops;
ret = domain->ops->domain_init(domain);
if (ret)
goto out_free;
domain->ops = bus->iommu_ops;
domain->type = IOMMU_DOMAIN_UNMANAGED;
return domain;
out_free:
kfree(domain);
return NULL;
}
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
if (likely(domain->ops->domain_destroy != NULL))
domain->ops->domain_destroy(domain);
kfree(domain);
domain->ops->domain_free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@ -1049,6 +1037,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
@ -1100,6 +1091,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
domain->ops->pgsize_bitmap == 0UL))
return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);

View File

@ -38,7 +38,7 @@ struct ipmmu_vmsa_device {
struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu;
struct iommu_domain *io_domain;
struct iommu_domain io_domain;
struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop;
@ -56,6 +56,11 @@ struct ipmmu_vmsa_archdata {
static DEFINE_SPINLOCK(ipmmu_devices_lock);
static LIST_HEAD(ipmmu_devices);
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
{
return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
}
#define TLB_LOOP_TIMEOUT 100 /* 100us */
/* -----------------------------------------------------------------------------
@ -428,7 +433,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
* TODO: We need to look up the faulty device based on the I/O VA. Use
* the IOMMU device for now.
*/
if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0))
if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev,
@ -448,7 +453,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
return IRQ_NONE;
io_domain = mmu->mapping->domain;
domain = io_domain->priv;
domain = to_vmsa_domain(io_domain);
return ipmmu_domain_irq(domain);
}
@ -457,25 +462,25 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
* IOMMU Operations
*/
static int ipmmu_domain_init(struct iommu_domain *io_domain)
static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
{
struct ipmmu_vmsa_domain *domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return -ENOMEM;
return NULL;
spin_lock_init(&domain->lock);
io_domain->priv = domain;
domain->io_domain = io_domain;
return 0;
return &domain->io_domain;
}
static void ipmmu_domain_destroy(struct iommu_domain *io_domain)
static void ipmmu_domain_free(struct iommu_domain *io_domain)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/*
* Free the domain resources. We assume that all devices have already
@ -491,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags;
unsigned int i;
int ret = 0;
@ -532,7 +537,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
struct device *dev)
{
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_domain *domain = io_domain->priv;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned int i;
for (i = 0; i < archdata->num_utlbs; ++i)
@ -546,7 +551,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
if (!domain)
return -ENODEV;
@ -557,7 +562,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
return domain->iop->unmap(domain->iop, iova, size);
}
@ -565,7 +570,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova)
{
struct ipmmu_vmsa_domain *domain = io_domain->priv;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/* TODO: Is locking needed ? */
@ -737,8 +742,8 @@ static void ipmmu_remove_device(struct device *dev)
}
static const struct iommu_ops ipmmu_ops = {
.domain_init = ipmmu_domain_init,
.domain_destroy = ipmmu_domain_destroy,
.domain_alloc = ipmmu_domain_alloc,
.domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,

View File

@ -52,8 +52,14 @@ DEFINE_SPINLOCK(msm_iommu_lock);
struct msm_priv {
unsigned long *pgtable;
struct list_head list_attached;
struct iommu_domain domain;
};
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
{
return container_of(dom, struct msm_priv, domain);
}
static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
{
int ret;
@ -79,7 +85,7 @@ static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
static int __flush_iotlb(struct iommu_domain *domain)
{
struct msm_priv *priv = domain->priv;
struct msm_priv *priv = to_msm_priv(domain);
struct msm_iommu_drvdata *iommu_drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret = 0;
@ -209,10 +215,14 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_M(base, ctx, 1);
}
static int msm_iommu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
{
struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
struct msm_priv *priv;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
goto fail_nomem;
@ -224,20 +234,19 @@ static int msm_iommu_domain_init(struct iommu_domain *domain)
goto fail_nomem;
memset(priv->pgtable, 0, SZ_16K);
domain->priv = priv;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = (1ULL << 32) - 1;
domain->geometry.force_aperture = true;
priv->domain.geometry.aperture_start = 0;
priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
priv->domain.geometry.force_aperture = true;
return 0;
return &priv->domain;
fail_nomem:
kfree(priv);
return -ENOMEM;
return NULL;
}
static void msm_iommu_domain_destroy(struct iommu_domain *domain)
static void msm_iommu_domain_free(struct iommu_domain *domain)
{
struct msm_priv *priv;
unsigned long flags;
@ -245,20 +254,17 @@ static void msm_iommu_domain_destroy(struct iommu_domain *domain)
int i;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv;
domain->priv = NULL;
priv = to_msm_priv(domain);
if (priv) {
fl_table = priv->pgtable;
fl_table = priv->pgtable;
for (i = 0; i < NUM_FL_PTE; i++)
if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
free_page((unsigned long) __va(((fl_table[i]) &
FL_BASE_MASK)));
for (i = 0; i < NUM_FL_PTE; i++)
if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
free_page((unsigned long) __va(((fl_table[i]) &
FL_BASE_MASK)));
free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
priv->pgtable = NULL;
}
free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
priv->pgtable = NULL;
kfree(priv);
spin_unlock_irqrestore(&msm_iommu_lock, flags);
@ -276,9 +282,9 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv;
priv = to_msm_priv(domain);
if (!priv || !dev) {
if (!dev) {
ret = -EINVAL;
goto fail;
}
@ -330,9 +336,9 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
int ret;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv;
priv = to_msm_priv(domain);
if (!priv || !dev)
if (!dev)
goto fail;
iommu_drvdata = dev_get_drvdata(dev->parent);
@ -382,11 +388,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
goto fail;
}
priv = domain->priv;
if (!priv) {
ret = -EINVAL;
goto fail;
}
priv = to_msm_priv(domain);
fl_table = priv->pgtable;
@ -484,10 +486,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv;
if (!priv)
goto fail;
priv = to_msm_priv(domain);
fl_table = priv->pgtable;
@ -566,7 +565,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv;
priv = to_msm_priv(domain);
if (list_empty(&priv->list_attached))
goto fail;
@ -674,8 +673,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
static const struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_init = msm_iommu_domain_init,
.domain_destroy = msm_iommu_domain_destroy,
.domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,

View File

@ -59,6 +59,7 @@ struct omap_iommu_domain {
struct omap_iommu *iommu_dev;
struct device *dev;
spinlock_t lock;
struct iommu_domain domain;
};
#define MMU_LOCK_BASE_SHIFT 10
@ -79,6 +80,15 @@ struct iotlb_lock {
static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep;
/**
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
* @dom: generic iommu domain handle
**/
static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
{
return container_of(dom, struct omap_iommu_domain, domain);
}
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device
@ -901,7 +911,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte;
struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain;
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
if (!omap_domain->iommu_dev)
return IRQ_NONE;
@ -1113,7 +1123,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
struct iotlb_entry e;
@ -1140,7 +1150,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
@ -1152,7 +1162,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0;
@ -1212,17 +1222,20 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
static int omap_iommu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
{
struct omap_iommu_domain *omap_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain) {
pr_err("kzalloc failed\n");
@ -1244,25 +1257,21 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock);
domain->priv = omap_domain;
omap_domain->domain.geometry.aperture_start = 0;
omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
omap_domain->domain.geometry.force_aperture = true;
domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = (1ULL << 32) - 1;
domain->geometry.force_aperture = true;
return 0;
return &omap_domain->domain;
fail_nomem:
kfree(omap_domain);
out:
return -ENOMEM;
return NULL;
}
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
static void omap_iommu_domain_free(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
/*
* An iommu device is still attached
@ -1278,7 +1287,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
u32 *pgd, *pte;
@ -1358,8 +1367,8 @@ static void omap_iommu_remove_device(struct device *dev)
}
static const struct iommu_ops omap_iommu_ops = {
.domain_init = omap_iommu_domain_init,
.domain_destroy = omap_iommu_domain_destroy,
.domain_alloc = omap_iommu_domain_alloc,
.domain_free = omap_iommu_domain_free,
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,

View File

@ -80,6 +80,8 @@ struct rk_iommu_domain {
u32 *dt; /* page directory table */
spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */
struct iommu_domain domain;
};
struct rk_iommu {
@ -100,6 +102,11 @@ static inline void rk_table_flush(u32 *va, unsigned int count)
outer_flush_range(pa_start, pa_end);
}
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
{
return container_of(dom, struct rk_iommu_domain, domain);
}
/**
* Inspired by _wait_for in intel_drv.h
* This is NOT safe for use in interrupt context.
@ -503,7 +510,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct rk_iommu_domain *rk_domain = domain->priv;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
phys_addr_t pt_phys, phys = 0;
u32 dte, pte;
@ -639,7 +646,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
phys_addr_t paddr, size_t size, int prot)
{
struct rk_iommu_domain *rk_domain = domain->priv;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr;
@ -670,7 +677,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
size_t size)
{
struct rk_iommu_domain *rk_domain = domain->priv;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova;
phys_addr_t pt_phys;
@ -726,7 +733,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = domain->priv;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
int ret;
phys_addr_t dte_addr;
@ -778,7 +785,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = domain->priv;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags;
/* Allow 'virtual devices' (eg drm) to detach from domain */
@ -804,13 +811,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
dev_info(dev, "Detached from iommu domain\n");
}
static int rk_iommu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{
struct rk_iommu_domain *rk_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain)
return -ENOMEM;
return NULL;
/*
* rk32xx iommus use a 2 level pagetable.
@ -827,17 +837,16 @@ static int rk_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus);
domain->priv = rk_domain;
return &rk_domain->domain;
return 0;
err_dt:
kfree(rk_domain);
return -ENOMEM;
return NULL;
}
static void rk_iommu_domain_destroy(struct iommu_domain *domain)
static void rk_iommu_domain_free(struct iommu_domain *domain)
{
struct rk_iommu_domain *rk_domain = domain->priv;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
int i;
WARN_ON(!list_empty(&rk_domain->iommus));
@ -852,8 +861,7 @@ static void rk_iommu_domain_destroy(struct iommu_domain *domain)
}
free_page((unsigned long)rk_domain->dt);
kfree(domain->priv);
domain->priv = NULL;
kfree(rk_domain);
}
static bool rk_iommu_is_dev_iommu_master(struct device *dev)
@ -952,8 +960,8 @@ static void rk_iommu_remove_device(struct device *dev)
}
static const struct iommu_ops rk_iommu_ops = {
.domain_init = rk_iommu_domain_init,
.domain_destroy = rk_iommu_domain_destroy,
.domain_alloc = rk_iommu_domain_alloc,
.domain_free = rk_iommu_domain_free,
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,

View File

@ -42,11 +42,17 @@ struct shmobile_iommu_domain {
spinlock_t map_lock;
spinlock_t attached_list_lock;
struct list_head attached_list;
struct iommu_domain domain;
};
static struct shmobile_iommu_archdata *ipmmu_archdata;
static struct kmem_cache *l1cache, *l2cache;
static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom)
{
return container_of(dom, struct shmobile_iommu_domain, domain);
}
static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
struct kmem_cache *cache, size_t size)
{
@ -82,31 +88,33 @@ static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
sizeof(val) * count, DMA_TO_DEVICE);
}
static int shmobile_iommu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type)
{
struct shmobile_iommu_domain *sh_domain;
int i, ret;
sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL);
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL);
if (!sh_domain)
return -ENOMEM;
return NULL;
ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
if (ret < 0) {
kfree(sh_domain);
return ret;
return NULL;
}
for (i = 0; i < L1_LEN; i++)
sh_domain->l2[i].pgtable = NULL;
spin_lock_init(&sh_domain->map_lock);
spin_lock_init(&sh_domain->attached_list_lock);
INIT_LIST_HEAD(&sh_domain->attached_list);
domain->priv = sh_domain;
return 0;
return &sh_domain->domain;
}
static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
static void shmobile_iommu_domain_free(struct iommu_domain *domain)
{
struct shmobile_iommu_domain *sh_domain = domain->priv;
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int i;
for (i = 0; i < L1_LEN; i++) {
@ -115,14 +123,13 @@ static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
}
pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
kfree(sh_domain);
domain->priv = NULL;
}
static int shmobile_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
struct shmobile_iommu_domain *sh_domain = domain->priv;
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int ret = -EBUSY;
if (!archdata)
@ -151,7 +158,7 @@ static void shmobile_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
struct shmobile_iommu_domain *sh_domain = domain->priv;
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
if (!archdata)
return;
@ -214,7 +221,7 @@ static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
struct shmobile_iommu_domain *sh_domain = domain->priv;
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index;
int ret;
@ -258,7 +265,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
struct shmobile_iommu_domain *sh_domain = domain->priv;
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index;
uint32_t l2entry = 0;
size_t ret = 0;
@ -298,7 +305,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct shmobile_iommu_domain *sh_domain = domain->priv;
struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
uint32_t l1entry = 0, l2entry = 0;
unsigned int l1index, l2index;
@ -355,8 +362,8 @@ static int shmobile_iommu_add_device(struct device *dev)
}
static const struct iommu_ops shmobile_iommu_ops = {
.domain_init = shmobile_iommu_domain_init,
.domain_destroy = shmobile_iommu_domain_destroy,
.domain_alloc = shmobile_iommu_domain_alloc,
.domain_free = shmobile_iommu_domain_free,
.attach_dev = shmobile_iommu_attach_device,
.detach_dev = shmobile_iommu_detach_device,
.map = shmobile_iommu_map,

View File

@ -63,11 +63,21 @@ struct gart_device {
struct device *dev;
};
struct gart_domain {
struct iommu_domain domain; /* generic domain handle */
struct gart_device *gart; /* link to gart device */
};
static struct gart_device *gart_handle; /* unique for a system */
#define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
{
return container_of(dom, struct gart_domain, domain);
}
/*
* Any interaction between any block on PPSB and a block on APB or AHB
* must have these read-back to ensure the APB/AHB bus transaction is
@ -156,20 +166,11 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct gart_device *gart;
struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
struct gart_client *client, *c;
int err = 0;
gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
domain->geometry.aperture_start = gart->iovmm_base;
domain->geometry.aperture_end = gart->iovmm_base +
gart->page_count * GART_PAGE_SIZE - 1;
domain->geometry.force_aperture = true;
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
if (!client)
return -ENOMEM;
@ -198,7 +199,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
static void gart_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct gart_device *gart = domain->priv;
struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
struct gart_client *c;
spin_lock(&gart->client_lock);
@ -216,33 +218,55 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain,
spin_unlock(&gart->client_lock);
}
static int gart_iommu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
{
return 0;
struct gart_domain *gart_domain;
struct gart_device *gart;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
gart = gart_handle;
if (!gart)
return NULL;
gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
if (!gart_domain)
return NULL;
gart_domain->gart = gart;
gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
gart->page_count * GART_PAGE_SIZE - 1;
gart_domain->domain.geometry.force_aperture = true;
return &gart_domain->domain;
}
static void gart_iommu_domain_destroy(struct iommu_domain *domain)
static void gart_iommu_domain_free(struct iommu_domain *domain)
{
struct gart_device *gart = domain->priv;
struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
if (!gart)
return;
if (gart) {
spin_lock(&gart->client_lock);
if (!list_empty(&gart->client)) {
struct gart_client *c;
spin_lock(&gart->client_lock);
if (!list_empty(&gart->client)) {
struct gart_client *c;
list_for_each_entry(c, &gart->client, list)
gart_iommu_detach_dev(domain, c->dev);
list_for_each_entry(c, &gart->client, list)
gart_iommu_detach_dev(domain, c->dev);
}
spin_unlock(&gart->client_lock);
}
spin_unlock(&gart->client_lock);
domain->priv = NULL;
kfree(gart_domain);
}
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot)
{
struct gart_device *gart = domain->priv;
struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
unsigned long flags;
unsigned long pfn;
@ -265,7 +289,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t bytes)
{
struct gart_device *gart = domain->priv;
struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
unsigned long flags;
if (!gart_iova_range_valid(gart, iova, bytes))
@ -281,7 +306,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct gart_device *gart = domain->priv;
struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
unsigned long pte;
phys_addr_t pa;
unsigned long flags;
@ -310,8 +336,8 @@ static bool gart_iommu_capable(enum iommu_cap cap)
static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
.domain_init = gart_iommu_domain_init,
.domain_destroy = gart_iommu_domain_destroy,
.domain_alloc = gart_iommu_domain_alloc,
.domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,

View File

@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/iommu.h>
#include <linux/kernel.h>
@ -24,6 +25,8 @@ struct tegra_smmu {
struct tegra_mc *mc;
const struct tegra_smmu_soc *soc;
unsigned long pfn_mask;
unsigned long *asids;
struct mutex lock;
@ -31,7 +34,7 @@ struct tegra_smmu {
};
struct tegra_smmu_as {
struct iommu_domain *domain;
struct iommu_domain domain;
struct tegra_smmu *smmu;
unsigned int use_count;
struct page *count;
@ -40,6 +43,11 @@ struct tegra_smmu_as {
u32 attr;
};
static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
{
return container_of(dom, struct tegra_smmu_as, domain);
}
static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
unsigned long offset)
{
@ -105,8 +113,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12
#define SMMU_PFN_MASK 0x000fffff
#define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29)
@ -224,30 +230,32 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
return false;
}
static int tegra_smmu_domain_init(struct iommu_domain *domain)
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
unsigned int i;
uint32_t *pd;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
as = kzalloc(sizeof(*as), GFP_KERNEL);
if (!as)
return -ENOMEM;
return NULL;
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
as->domain = domain;
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
if (!as->pd) {
kfree(as);
return -ENOMEM;
return NULL;
}
as->count = alloc_page(GFP_KERNEL);
if (!as->count) {
__free_page(as->pd);
kfree(as);
return -ENOMEM;
return NULL;
}
/* clear PDEs */
@ -264,14 +272,17 @@ static int tegra_smmu_domain_init(struct iommu_domain *domain)
for (i = 0; i < SMMU_NUM_PDE; i++)
pd[i] = 0;
domain->priv = as;
/* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
as->domain.geometry.force_aperture = true;
return 0;
return &as->domain;
}
static void tegra_smmu_domain_destroy(struct iommu_domain *domain)
static void tegra_smmu_domain_free(struct iommu_domain *domain)
{
struct tegra_smmu_as *as = domain->priv;
struct tegra_smmu_as *as = to_smmu_as(domain);
/* TODO: free page directory and page tables */
ClearPageReserved(as->pd);
@ -395,7 +406,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct tegra_smmu *smmu = dev->archdata.iommu;
struct tegra_smmu_as *as = domain->priv;
struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct of_phandle_args args;
unsigned int index = 0;
@ -428,7 +439,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
struct tegra_smmu_as *as = domain->priv;
struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = as->smmu;
struct of_phandle_args args;
@ -481,7 +492,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu);
} else {
page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
page = pfn_to_page(pd[pde] & smmu->pfn_mask);
pt = page_address(page);
}
@ -503,7 +514,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
u32 *pd = page_address(as->pd), *pt;
struct page *page;
page = pfn_to_page(pd[pde] & SMMU_PFN_MASK);
page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
pt = page_address(page);
/*
@ -524,7 +535,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct tegra_smmu_as *as = domain->priv;
struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu;
unsigned long offset;
struct page *page;
@ -548,7 +559,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size)
{
struct tegra_smmu_as *as = domain->priv;
struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu;
unsigned long offset;
struct page *page;
@ -572,13 +583,13 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
struct tegra_smmu_as *as = domain->priv;
struct tegra_smmu_as *as = to_smmu_as(domain);
struct page *page;
unsigned long pfn;
u32 *pte;
pte = as_get_pte(as, iova, &page);
pfn = *pte & SMMU_PFN_MASK;
pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn);
}
@ -633,8 +644,8 @@ static void tegra_smmu_remove_device(struct device *dev)
static const struct iommu_ops tegra_smmu_ops = {
.capable = tegra_smmu_capable,
.domain_init = tegra_smmu_domain_init,
.domain_destroy = tegra_smmu_domain_destroy,
.domain_alloc = tegra_smmu_domain_alloc,
.domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
.add_device = tegra_smmu_add_device,
@ -702,6 +713,10 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev;
smmu->mc = mc;
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
if (soc->supports_request_limit)

View File

@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
dm_get(md);
atomic_inc(&md->open_count);
out:
spin_unlock(&_minor_lock);
@ -442,16 +441,20 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
{
struct mapped_device *md = disk->private_data;
struct mapped_device *md;
spin_lock(&_minor_lock);
md = disk->private_data;
if (WARN_ON(!md))
goto out;
if (atomic_dec_and_test(&md->open_count) &&
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
queue_work(deferred_remove_workqueue, &deferred_remove_work);
dm_put(md);
out:
spin_unlock(&_minor_lock);
}
@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
int minor = MINOR(disk_devt(md->disk));
unlock_fs(md);
bdput(md->bdev);
destroy_workqueue(md->wq);
if (md->kworker_task)
@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
mempool_destroy(md->rq_pool);
if (md->bs)
bioset_free(md->bs);
blk_integrity_unregister(md->disk);
del_gendisk(md->disk);
cleanup_srcu_struct(&md->io_barrier);
free_table_devices(&md->table_devices);
free_minor(minor);
dm_stats_cleanup(&md->stats);
spin_lock(&_minor_lock);
md->disk->private_data = NULL;
spin_unlock(&_minor_lock);
if (blk_get_integrity(md->disk))
blk_integrity_unregister(md->disk);
del_gendisk(md->disk);
put_disk(md->disk);
blk_cleanup_queue(md->queue);
dm_stats_cleanup(&md->stats);
bdput(md->bdev);
free_minor(minor);
module_put(THIS_MODULE);
kfree(md);
}
@ -2642,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
might_sleep();
spin_lock(&_minor_lock);
map = dm_get_live_table(md, &srcu_idx);
spin_lock(&_minor_lock);
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);

View File

@ -739,7 +739,7 @@ static int __init kempld_init(void)
for (id = kempld_dmi_table;
id->matches[0].slot != DMI_NONE; id++)
if (strstr(id->ident, force_device_id))
if (id->callback && id->callback(id))
if (id->callback && !id->callback(id))
break;
if (id->matches[0].slot == DMI_NONE)
return -ENODEV;

View File

@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
{
u16 value;
u8 *buf;
int ret;
if (!data)
return -EINVAL;
*data = 0;
buf = kzalloc(sizeof(u8), GFP_KERNEL);
if (!buf)
return -ENOMEM;
addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
value = swab16(addr);
return usb_control_msg(ucr->pusb_dev,
ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, 0, data, 1, 100);
value, 0, buf, 1, 100);
*data = *buf;
kfree(buf);
return ret;
}
EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
u16 *buf;
if (!status)
return -EINVAL;
if (polling_pipe == 0)
if (polling_pipe == 0) {
buf = kzalloc(sizeof(u16), GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0),
RTSX_USB_REQ_POLL,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, 0, status, 2, 100);
else
0, 0, buf, 2, 100);
*status = *buf;
kfree(buf);
} else {
ret = rtsx_usb_get_status_with_bulk(ucr, status);
}
/* usb_control_msg may return positive when success */
if (ret < 0)

View File

@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
{
struct pcnet32_private *lp;
int i, media;
int fdx, mii, fset, dxsuflo;
int fdx, mii, fset, dxsuflo, sram;
int chip_version;
char *chipname;
struct net_device *dev;
@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
/* initialize variables */
fdx = mii = fset = dxsuflo = 0;
fdx = mii = fset = dxsuflo = sram = 0;
chip_version = (chip_version >> 12) & 0xffff;
switch (chip_version) {
@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chipname = "PCnet/FAST III 79C973"; /* PCI */
fdx = 1;
mii = 1;
sram = 1;
break;
case 0x2626:
chipname = "PCnet/Home 79C978"; /* PCI */
@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chipname = "PCnet/FAST III 79C975"; /* PCI */
fdx = 1;
mii = 1;
sram = 1;
break;
case 0x2628:
chipname = "PCnet/PRO 79C976";
@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dxsuflo = 1;
}
/*
* The Am79C973/Am79C975 controllers come with 12K of SRAM
* which we can use for the Tx/Rx buffers but most importantly,
* the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
* Tx fifo underflows.
*/
if (sram) {
/*
* The SRAM is being configured in two steps. First we
* set the SRAM size in the BCR25:SRAM_SIZE bits. According
* to the datasheet, each bit corresponds to a 512-byte
* page so we can have at most 24 pages. The SRAM_SIZE
* holds the value of the upper 8 bits of the 16-bit SRAM size.
* The low 8-bits start at 0x00 and end at 0xff. So the
* address range is from 0x0000 up to 0x17ff. Therefore,
* the SRAM_SIZE is set to 0x17. The next step is to set
* the BCR26:SRAM_BND midway through so the Tx and Rx
* buffers can share the SRAM equally.
*/
a->write_bcr(ioaddr, 25, 0x17);
a->write_bcr(ioaddr, 26, 0xc);
/* And finally enable the NOUFLO bit */
a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
}
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
ret = -ENOMEM;

View File

@ -354,6 +354,7 @@ struct be_vf_cfg {
u16 vlan_tag;
u32 tx_rate;
u32 plink_tracking;
u32 privileges;
};
enum vf_state {
@ -423,6 +424,7 @@ struct be_adapter {
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;

View File

@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
{
int num_eqs, i = 0;
if (lancer_chip(adapter) && num > 8) {
while (num) {
num_eqs = min(num, 8);
__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
i += num_eqs;
num -= num_eqs;
}
} else {
__be_cmd_modify_eqd(adapter, set_eqd, num);
while (num) {
num_eqs = min(num, 8);
__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
i += num_eqs;
num -= num_eqs;
}
return 0;
@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
u32 num)
u32 num, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
wrb, NULL);
req->hdr.domain = domain;
req->interface_id = if_id;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;

View File

@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
int be_cmd_get_fw_ver(struct be_adapter *adapter);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
u32 num);
u32 num, u32 domain);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);

View File

@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n");
/* Set to VLAN promisc mode as setting VLAN filter failed */
@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
{
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
u16 vids[BE_NUM_VLANS_SUPPORTED];
int vf_if_id = vf_cfg->if_handle;
int status;
/* Enable Transparent VLAN Tagging */
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
if (status)
return status;
/* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
vids[0] = 0;
status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
if (!status)
dev_info(&adapter->pdev->dev,
"Cleared guest VLANs on VF%d", vf);
/* After TVT is enabled, disallow VFs to program VLAN filters */
if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
~BE_PRIV_FILTMGMT, vf + 1);
if (!status)
vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
}
return 0;
}
static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
{
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
struct device *dev = &adapter->pdev->dev;
int status;
/* Reset Transparent VLAN Tagging. */
status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
vf_cfg->if_handle, 0);
if (status)
return status;
/* Allow VFs to program VLAN filtering */
if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
BE_PRIV_FILTMGMT, vf + 1);
if (!status) {
vf_cfg->privileges |= BE_PRIV_FILTMGMT;
dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
}
}
dev_info(dev,
"Disable/re-enable i/f in VM to clear Transparent VLAN tag");
return 0;
}
static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status = 0;
int status;
if (!sriov_enabled(adapter))
return -EPERM;
@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
if (vf_cfg->vlan_tag != vlan)
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
vf_cfg->if_handle, 0);
status = be_set_vf_tvt(adapter, vf, vlan);
} else {
/* Reset Transparent Vlan Tagging. */
status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
vf + 1, vf_cfg->if_handle, 0);
status = be_clear_vf_tvt(adapter, vf);
}
if (status) {
dev_err(&adapter->pdev->dev,
"VLAN %d config on VF %d failed : %#x\n", vlan,
vf, status);
"VLAN %d config on VF %d failed : %#x\n", vlan, vf,
status);
return be_cmd_status(status);
}
vf_cfg->vlan_tag = vlan;
return 0;
}
@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
}
}
} else {
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_LOW, &ue_lo);
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_HIGH, &ue_hi);
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
ue_lo_mask = ioread32(adapter->pcicfg +
PCICFG_UE_STATUS_LOW_MASK);
ue_hi_mask = ioread32(adapter->pcicfg +
PCICFG_UE_STATUS_HI_MASK);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
u32 cap_flags, u32 vf)
{
u32 en_flags;
int status;
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
en_flags &= cap_flags;
status = be_cmd_if_create(adapter, cap_flags, en_flags,
if_handle, vf);
return status;
return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
}
static int be_vfs_if_create(struct be_adapter *adapter)
@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
if (!BE3_chip(adapter)) {
status = be_cmd_get_profile_config(adapter, &res,
vf + 1);
if (!status)
if (!status) {
cap_flags = res.if_cap_flags;
/* Prevent VFs from enabling VLAN promiscuous
* mode
*/
cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
}
}
status = be_if_create(adapter, &vf_cfg->if_handle,
@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
u32 privileges;
old_vfs = pci_num_vf(adapter->pdev);
@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
/* Allow VFs to programs MAC/VLAN filters */
status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
vf + 1);
if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
status = be_cmd_set_fn_privileges(adapter,
privileges |
vf_cfg->privileges |
BE_PRIV_FILTMGMT,
vf + 1);
if (!status)
if (!status) {
vf_cfg->privileges |= BE_PRIV_FILTMGMT;
dev_info(dev, "VF%d has FILTMGMT privilege\n",
vf);
}
}
/* Allow full available bandwidth */
@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
static int be_map_pci_bars(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
u8 __iomem *addr;
if (BEx_chip(adapter) && be_physfn(adapter)) {
adapter->csr = pci_iomap(adapter->pdev, 2, 0);
adapter->csr = pci_iomap(pdev, 2, 0);
if (!adapter->csr)
return -ENOMEM;
}
addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
addr = pci_iomap(pdev, db_bar(adapter), 0);
if (!addr)
goto pci_map_err;
adapter->db = addr;
if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
if (be_physfn(adapter)) {
/* PCICFG is the 2nd BAR in BE2 */
addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
if (!addr)
goto pci_map_err;
adapter->pcicfg = addr;
} else {
adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
}
}
be_roce_map_pci_bars(adapter);
return 0;
pci_map_err:
dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
be_unmap_pci_bars(adapter);
return -ENOMEM;
}

View File

@ -46,8 +46,7 @@ enum cx82310_status {
};
#define CMD_PACKET_SIZE 64
/* first command after power on can take around 8 seconds */
#define CMD_TIMEOUT 15000
#define CMD_TIMEOUT 100
#define CMD_REPLY_RETRY 5
#define CX82310_MTU 1514
@ -78,8 +77,9 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, CMD_EP), buf,
CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
if (ret < 0) {
dev_err(&dev->udev->dev, "send command %#x: error %d\n",
cmd, ret);
if (cmd != CMD_GET_LINK_STATUS)
dev_err(&dev->udev->dev, "send command %#x: error %d\n",
cmd, ret);
goto end;
}
@ -90,8 +90,10 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
buf, CMD_PACKET_SIZE, &actual_len,
CMD_TIMEOUT);
if (ret < 0) {
dev_err(&dev->udev->dev,
"reply receive error %d\n", ret);
if (cmd != CMD_GET_LINK_STATUS)
dev_err(&dev->udev->dev,
"reply receive error %d\n",
ret);
goto end;
}
if (actual_len > 0)
@ -134,6 +136,8 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
int ret;
char buf[15];
struct usb_device *udev = dev->udev;
u8 link[3];
int timeout = 50;
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@ -160,6 +164,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
if (!dev->partial_data)
return -ENOMEM;
/* wait for firmware to become ready (indicated by the link being up) */
while (--timeout) {
ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
link, sizeof(link));
/* the command can time out during boot - it's not an error */
if (!ret && link[0] == 1 && link[2] == 1)
break;
msleep(500);
};
if (!timeout) {
dev_err(&udev->dev, "firmware not ready in time\n");
return -ETIMEDOUT;
}
/* enable ethernet mode (?) */
ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
if (ret) {

View File

@ -1572,6 +1572,10 @@ static int palmas_regulators_probe(struct platform_device *pdev)
if (!pmic)
return -ENOMEM;
if (of_device_is_compatible(node, "ti,tps659038-pmic"))
palmas_generic_regs_info[PALMAS_REG_REGEN2].ctrl_addr =
TPS659038_REGEN2_CTRL;
pmic->dev = &pdev->dev;
pmic->palmas = palmas;
palmas->pmic = pmic;

View File

@ -413,8 +413,8 @@ static void rtc_mrst_do_remove(struct device *dev)
mrst->dev = NULL;
}
#ifdef CONFIG_PM
static int mrst_suspend(struct device *dev, pm_message_t mesg)
#ifdef CONFIG_PM_SLEEP
static int mrst_suspend(struct device *dev)
{
struct mrst_rtc *mrst = dev_get_drvdata(dev);
unsigned char tmp;
@ -453,7 +453,7 @@ static int mrst_suspend(struct device *dev, pm_message_t mesg)
*/
static inline int mrst_poweroff(struct device *dev)
{
return mrst_suspend(dev, PMSG_HIBERNATE);
return mrst_suspend(dev);
}
static int mrst_resume(struct device *dev)
@ -490,9 +490,11 @@ static int mrst_resume(struct device *dev)
return 0;
}
static SIMPLE_DEV_PM_OPS(mrst_pm_ops, mrst_suspend, mrst_resume);
#define MRST_PM_OPS (&mrst_pm_ops)
#else
#define mrst_suspend NULL
#define mrst_resume NULL
#define MRST_PM_OPS NULL
static inline int mrst_poweroff(struct device *dev)
{
@ -529,9 +531,8 @@ static struct platform_driver vrtc_mrst_platform_driver = {
.remove = vrtc_mrst_platform_remove,
.shutdown = vrtc_mrst_platform_shutdown,
.driver = {
.name = (char *) driver_name,
.suspend = mrst_suspend,
.resume = mrst_resume,
.name = driver_name,
.pm = MRST_PM_OPS,
}
};

View File

@ -6815,7 +6815,8 @@ static struct ata_port_operations ipr_sata_ops = {
};
static struct ata_port_info sata_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4_ONLY,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,

View File

@ -547,7 +547,8 @@ static struct ata_port_operations sas_sata_ops = {
};
static struct ata_port_info sata_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ,
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
ATA_FLAG_SAS_HOST,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,

View File

@ -108,7 +108,8 @@ static void dw_spi_dma_tx_done(void *arg)
{
struct dw_spi *dws = arg;
if (test_and_clear_bit(TX_BUSY, &dws->dma_chan_busy) & BIT(RX_BUSY))
clear_bit(TX_BUSY, &dws->dma_chan_busy);
if (test_bit(RX_BUSY, &dws->dma_chan_busy))
return;
dw_spi_xfer_done(dws);
}
@ -156,7 +157,8 @@ static void dw_spi_dma_rx_done(void *arg)
{
struct dw_spi *dws = arg;
if (test_and_clear_bit(RX_BUSY, &dws->dma_chan_busy) & BIT(TX_BUSY))
clear_bit(RX_BUSY, &dws->dma_chan_busy);
if (test_bit(TX_BUSY, &dws->dma_chan_busy))
return;
dw_spi_xfer_done(dws);
}

View File

@ -498,7 +498,7 @@ static int spi_qup_probe(struct platform_device *pdev)
struct resource *res;
struct device *dev;
void __iomem *base;
u32 max_freq, iomode;
u32 max_freq, iomode, num_cs;
int ret, irq, size;
dev = &pdev->dev;
@ -550,10 +550,11 @@ static int spi_qup_probe(struct platform_device *pdev)
}
/* use num-cs unless not present or out of range */
if (of_property_read_u16(dev->of_node, "num-cs",
&master->num_chipselect) ||
(master->num_chipselect > SPI_NUM_CHIPSELECTS))
if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
num_cs > SPI_NUM_CHIPSELECTS)
master->num_chipselect = SPI_NUM_CHIPSELECTS;
else
master->num_chipselect = num_cs;
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;

View File

@ -1105,13 +1105,14 @@ void spi_finalize_current_message(struct spi_master *master)
"failed to unprepare message: %d\n", ret);
}
}
trace_spi_message_done(mesg);
master->cur_msg_prepared = false;
mesg->state = NULL;
if (mesg->complete)
mesg->complete(mesg->context);
trace_spi_message_done(mesg);
}
EXPORT_SYMBOL_GPL(spi_finalize_current_message);

Some files were not shown because too many files have changed in this diff Show More