mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/usb/asix_common.c drivers/net/usb/sr9800.c drivers/net/usb/usbnet.c include/linux/usb/usbnet.h net/ipv4/tcp_ipv4.c net/ipv6/tcp_ipv6.c The TCP conflicts were overlapping changes. In 'net' we added a READ_ONCE() to the socket cached RX route read, whilst in 'net-next' Eric Dumazet touched the surrounding code dealing with how mini sockets are handled. With USB, it's a case of the same bug fix first going into net-next and then I cherry picked it back into net. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9f0d34bc34
39
MAINTAINERS
39
MAINTAINERS
|
@ -1186,7 +1186,7 @@ M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
|||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-mvebu/
|
||||
F: drivers/rtc/armada38x-rtc
|
||||
F: drivers/rtc/rtc-armada38x.c
|
||||
|
||||
ARM/Marvell Berlin SoC support
|
||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||
|
@ -1362,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
|
|||
F: drivers/*/*rockchip*
|
||||
F: drivers/*/*/*rockchip*
|
||||
F: sound/soc/rockchip/
|
||||
N: rockchip
|
||||
|
||||
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
||||
M: Kukjin Kim <kgene@kernel.org>
|
||||
|
@ -1675,8 +1676,8 @@ F: drivers/misc/eeprom/at24.c
|
|||
F: include/linux/platform_data/at24.h
|
||||
|
||||
ATA OVER ETHERNET (AOE) DRIVER
|
||||
M: "Ed L. Cashin" <ecashin@coraid.com>
|
||||
W: http://support.coraid.com/support/linux
|
||||
M: "Ed L. Cashin" <ed.cashin@acm.org>
|
||||
W: http://www.openaoe.org/
|
||||
S: Supported
|
||||
F: Documentation/aoe/
|
||||
F: drivers/block/aoe/
|
||||
|
@ -3252,6 +3253,13 @@ S: Maintained
|
|||
F: Documentation/hwmon/dme1737
|
||||
F: drivers/hwmon/dme1737.c
|
||||
|
||||
DMI/SMBIOS SUPPORT
|
||||
M: Jean Delvare <jdelvare@suse.de>
|
||||
S: Maintained
|
||||
F: drivers/firmware/dmi-id.c
|
||||
F: drivers/firmware/dmi_scan.c
|
||||
F: include/linux/dmi.h
|
||||
|
||||
DOCKING STATION DRIVER
|
||||
M: Shaohua Li <shaohua.li@intel.com>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
|
@ -5128,22 +5136,21 @@ M: Deepak Saxena <dsaxena@plexity.net>
|
|||
S: Maintained
|
||||
F: drivers/char/hw_random/ixp4xx-rng.c
|
||||
|
||||
INTEL ETHERNET DRIVERS (e100/e1000/e1000e/fm10k/igb/igbvf/ixgb/ixgbe/ixgbevf/i40e/i40evf)
|
||||
INTEL ETHERNET DRIVERS
|
||||
M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
|
||||
M: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
||||
M: Bruce Allan <bruce.w.allan@intel.com>
|
||||
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||
M: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
M: Greg Rose <gregory.v.rose@intel.com>
|
||||
M: Matthew Vick <matthew.vick@intel.com>
|
||||
M: John Ronciak <john.ronciak@intel.com>
|
||||
M: Mitch Williams <mitch.a.williams@intel.com>
|
||||
M: Linux NICS <linux.nics@intel.com>
|
||||
L: e1000-devel@lists.sourceforge.net
|
||||
R: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
||||
R: Shannon Nelson <shannon.nelson@intel.com>
|
||||
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||
R: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
R: Matthew Vick <matthew.vick@intel.com>
|
||||
R: John Ronciak <john.ronciak@intel.com>
|
||||
R: Mitch Williams <mitch.a.williams@intel.com>
|
||||
L: intel-wired-lan@lists.osuosl.org
|
||||
W: http://www.intel.com/support/feedback.htm
|
||||
W: http://e1000.sourceforge.net/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next.git
|
||||
Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
|
||||
S: Supported
|
||||
F: Documentation/networking/e100.txt
|
||||
F: Documentation/networking/e1000.txt
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
|
|||
sigset_t *set)
|
||||
{
|
||||
int err;
|
||||
err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
|
||||
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs,
|
||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
|
||||
|
||||
|
@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
|
|||
if (!err)
|
||||
set_current_blocked(&set);
|
||||
|
||||
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
|
||||
err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch),
|
||||
sizeof(sf->uc.uc_mcontext.regs.scratch));
|
||||
|
||||
return err;
|
||||
|
@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|||
/* Don't restart from sigreturn */
|
||||
syscall_wont_restart(regs);
|
||||
|
||||
/*
|
||||
* Ensure that sigreturn always returns to user mode (in case the
|
||||
* regs saved on user stack got fudged between save and sigreturn)
|
||||
* Otherwise it is easy to panic the kernel with a custom
|
||||
* signal handler and/or restorer which clobberes the status32/ret
|
||||
* to return to a bogus location in kernel mode.
|
||||
*/
|
||||
regs->status32 |= STATUS_U_MASK;
|
||||
|
||||
return regs->r0;
|
||||
|
||||
badframe:
|
||||
|
@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
|
|||
|
||||
/*
|
||||
* handler returns using sigreturn stub provided already by userpsace
|
||||
* If not, nuke the process right away
|
||||
*/
|
||||
BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER));
|
||||
if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
|
||||
return 1;
|
||||
|
||||
regs->blink = (unsigned long)ksig->ka.sa.sa_restorer;
|
||||
|
||||
/* User Stack for signal handler will be above the frame just carved */
|
||||
|
@ -296,12 +308,12 @@ static void
|
|||
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||
{
|
||||
sigset_t *oldset = sigmask_to_save();
|
||||
int ret;
|
||||
int failed;
|
||||
|
||||
/* Set up the stack frame */
|
||||
ret = setup_rt_frame(ksig, oldset, regs);
|
||||
failed = setup_rt_frame(ksig, oldset, regs);
|
||||
|
||||
signal_setup_done(ret, ksig, 0);
|
||||
signal_setup_done(failed, ksig, 0);
|
||||
}
|
||||
|
||||
void do_signal(struct pt_regs *regs)
|
||||
|
|
|
@ -619,6 +619,7 @@ config ARCH_PXA
|
|||
select GENERIC_CLOCKEVENTS
|
||||
select GPIO_PXA
|
||||
select HAVE_IDE
|
||||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
select PLAT_PXA
|
||||
select SPARSE_IRQ
|
||||
|
|
|
@ -36,6 +36,20 @@ DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */
|
|||
>;
|
||||
};
|
||||
|
||||
mmc_pins: pinmux_mmc_pins {
|
||||
pinctrl-single,pins = <
|
||||
DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */
|
||||
DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */
|
||||
DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */
|
||||
DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */
|
||||
DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */
|
||||
DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */
|
||||
DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */
|
||||
DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */
|
||||
DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */
|
||||
>;
|
||||
};
|
||||
|
||||
usb0_pins: pinmux_usb0_pins {
|
||||
pinctrl-single,pins = <
|
||||
DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */
|
||||
|
@ -137,7 +151,12 @@ m25p80@0 {
|
|||
};
|
||||
|
||||
&mmc1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc_pins>;
|
||||
vmmc-supply = <&vmmcsd_fixed>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
|
||||
wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
|
||||
/* At least dm8168-evm rev c won't support multipoint, later may */
|
||||
|
|
|
@ -150,17 +150,27 @@ elm: elm@48080000 {
|
|||
};
|
||||
|
||||
gpio1: gpio@48032000 {
|
||||
compatible = "ti,omap3-gpio";
|
||||
compatible = "ti,omap4-gpio";
|
||||
ti,hwmods = "gpio1";
|
||||
ti,gpio-always-on;
|
||||
reg = <0x48032000 0x1000>;
|
||||
interrupts = <97>;
|
||||
interrupts = <96>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpio2: gpio@4804c000 {
|
||||
compatible = "ti,omap3-gpio";
|
||||
compatible = "ti,omap4-gpio";
|
||||
ti,hwmods = "gpio2";
|
||||
ti,gpio-always-on;
|
||||
reg = <0x4804c000 0x1000>;
|
||||
interrupts = <99>;
|
||||
interrupts = <98>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
gpmc: gpmc@50000000 {
|
||||
|
|
|
@ -1111,7 +1111,6 @@ pcie1_phy: pciephy@4a094000 {
|
|||
"wkupclk", "refclk",
|
||||
"div-clk", "phy-div";
|
||||
#phy-cells = <0>;
|
||||
ti,hwmods = "pcie1-phy";
|
||||
};
|
||||
|
||||
pcie2_phy: pciephy@4a095000 {
|
||||
|
@ -1130,7 +1129,6 @@ pcie2_phy: pciephy@4a095000 {
|
|||
"wkupclk", "refclk",
|
||||
"div-clk", "phy-div";
|
||||
#phy-cells = <0>;
|
||||
ti,hwmods = "pcie2-phy";
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
|
|
@ -92,6 +92,8 @@ aes: aes@480c5000 {
|
|||
ti,hwmods = "aes";
|
||||
reg = <0x480c5000 0x50>;
|
||||
interrupts = <0>;
|
||||
dmas = <&sdma 65 &sdma 66>;
|
||||
dma-names = "tx", "rx";
|
||||
};
|
||||
|
||||
prm: prm@48306000 {
|
||||
|
@ -550,6 +552,8 @@ sham: sham@480c3000 {
|
|||
ti,hwmods = "sham";
|
||||
reg = <0x480c3000 0x64>;
|
||||
interrupts = <49>;
|
||||
dmas = <&sdma 69>;
|
||||
dma-names = "rx";
|
||||
};
|
||||
|
||||
smartreflex_core: smartreflex@480cb000 {
|
||||
|
|
|
@ -411,6 +411,7 @@ gmac: ethernet@ff290000 {
|
|||
"mac_clk_rx", "mac_clk_tx",
|
||||
"clk_mac_ref", "clk_mac_refout",
|
||||
"aclk_mac", "pclk_mac";
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
usb_host0_ehci: usb@ff500000 {
|
||||
|
|
|
@ -660,7 +660,7 @@ spi1: spi@fff01000 {
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0xfff01000 0x1000>;
|
||||
interrupts = <0 156 4>;
|
||||
interrupts = <0 155 4>;
|
||||
num-cs = <4>;
|
||||
clocks = <&spi_m_clk>;
|
||||
status = "disabled";
|
||||
|
|
|
@ -56,6 +56,22 @@ / {
|
|||
model = "Olimex A10-OLinuXino-LIME";
|
||||
compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10";
|
||||
|
||||
cpus {
|
||||
cpu0: cpu@0 {
|
||||
/*
|
||||
* The A10-Lime is known to be unstable
|
||||
* when running at 1008 MHz
|
||||
*/
|
||||
operating-points = <
|
||||
/* kHz uV */
|
||||
912000 1350000
|
||||
864000 1300000
|
||||
624000 1250000
|
||||
>;
|
||||
cooling-max-level = <2>;
|
||||
};
|
||||
};
|
||||
|
||||
soc@01c00000 {
|
||||
emac: ethernet@01c0b000 {
|
||||
pinctrl-names = "default";
|
||||
|
|
|
@ -75,7 +75,6 @@ cpu0: cpu@0 {
|
|||
clock-latency = <244144>; /* 8 32k periods */
|
||||
operating-points = <
|
||||
/* kHz uV */
|
||||
1056000 1500000
|
||||
1008000 1400000
|
||||
912000 1350000
|
||||
864000 1300000
|
||||
|
@ -83,7 +82,7 @@ cpu0: cpu@0 {
|
|||
>;
|
||||
#cooling-cells = <2>;
|
||||
cooling-min-level = <0>;
|
||||
cooling-max-level = <4>;
|
||||
cooling-max-level = <3>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -47,7 +47,6 @@ cpu0: cpu@0 {
|
|||
clock-latency = <244144>; /* 8 32k periods */
|
||||
operating-points = <
|
||||
/* kHz uV */
|
||||
1104000 1500000
|
||||
1008000 1400000
|
||||
912000 1350000
|
||||
864000 1300000
|
||||
|
@ -57,7 +56,7 @@ cpu0: cpu@0 {
|
|||
>;
|
||||
#cooling-cells = <2>;
|
||||
cooling-min-level = <0>;
|
||||
cooling-max-level = <6>;
|
||||
cooling-max-level = <5>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -105,7 +105,6 @@ cpu0: cpu@0 {
|
|||
clock-latency = <244144>; /* 8 32k periods */
|
||||
operating-points = <
|
||||
/* kHz uV */
|
||||
1008000 1450000
|
||||
960000 1400000
|
||||
912000 1400000
|
||||
864000 1300000
|
||||
|
@ -116,7 +115,7 @@ cpu0: cpu@0 {
|
|||
>;
|
||||
#cooling-cells = <2>;
|
||||
cooling-min-level = <0>;
|
||||
cooling-max-level = <7>;
|
||||
cooling-max-level = <6>;
|
||||
};
|
||||
|
||||
cpu@1 {
|
||||
|
|
|
@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
|
|||
return kasprintf(GFP_KERNEL, "OMAP4");
|
||||
else if (soc_is_omap54xx())
|
||||
return kasprintf(GFP_KERNEL, "OMAP5");
|
||||
else if (soc_is_am33xx() || soc_is_am335x())
|
||||
return kasprintf(GFP_KERNEL, "AM33xx");
|
||||
else if (soc_is_am43xx())
|
||||
return kasprintf(GFP_KERNEL, "AM43xx");
|
||||
else if (soc_is_dra7xx())
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -40,7 +41,6 @@
|
|||
#define ICHP_VAL_IRQ (1 << 31)
|
||||
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
|
||||
#define IPR_VALID (1 << 31)
|
||||
#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
|
||||
|
||||
#define MAX_INTERNAL_IRQS 128
|
||||
|
||||
|
@ -51,6 +51,7 @@
|
|||
static void __iomem *pxa_irq_base;
|
||||
static int pxa_internal_irq_nr;
|
||||
static bool cpu_has_ipr;
|
||||
static struct irq_domain *pxa_irq_domain;
|
||||
|
||||
static inline void __iomem *irq_base(int i)
|
||||
{
|
||||
|
@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
|
|||
void pxa_mask_irq(struct irq_data *d)
|
||||
{
|
||||
void __iomem *base = irq_data_get_irq_chip_data(d);
|
||||
irq_hw_number_t irq = irqd_to_hwirq(d);
|
||||
uint32_t icmr = __raw_readl(base + ICMR);
|
||||
|
||||
icmr &= ~(1 << IRQ_BIT(d->irq));
|
||||
icmr &= ~BIT(irq & 0x1f);
|
||||
__raw_writel(icmr, base + ICMR);
|
||||
}
|
||||
|
||||
void pxa_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
void __iomem *base = irq_data_get_irq_chip_data(d);
|
||||
irq_hw_number_t irq = irqd_to_hwirq(d);
|
||||
uint32_t icmr = __raw_readl(base + ICMR);
|
||||
|
||||
icmr |= 1 << IRQ_BIT(d->irq);
|
||||
icmr |= BIT(irq & 0x1f);
|
||||
__raw_writel(icmr, base + ICMR);
|
||||
}
|
||||
|
||||
|
@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
|
|||
} while (1);
|
||||
}
|
||||
|
||||
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
|
||||
static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
int irq, i, n;
|
||||
void __iomem *base = irq_base(hw / 32);
|
||||
|
||||
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
|
||||
/* initialize interrupt priority */
|
||||
if (cpu_has_ipr)
|
||||
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
|
||||
|
||||
irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
|
||||
handle_level_irq);
|
||||
irq_set_chip_data(virq, base);
|
||||
set_irq_flags(virq, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops pxa_irq_ops = {
|
||||
.map = pxa_irq_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
static __init void
|
||||
pxa_init_irq_common(struct device_node *node, int irq_nr,
|
||||
int (*fn)(struct irq_data *, unsigned int))
|
||||
{
|
||||
int n;
|
||||
|
||||
pxa_internal_irq_nr = irq_nr;
|
||||
cpu_has_ipr = !cpu_is_pxa25x();
|
||||
pxa_irq_base = io_p2v(0x40d00000);
|
||||
pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
|
||||
PXA_IRQ(0), 0,
|
||||
&pxa_irq_ops, NULL);
|
||||
if (!pxa_irq_domain)
|
||||
panic("Unable to add PXA IRQ domain\n");
|
||||
irq_set_default_host(pxa_irq_domain);
|
||||
|
||||
for (n = 0; n < irq_nr; n += 32) {
|
||||
void __iomem *base = irq_base(n >> 5);
|
||||
|
||||
__raw_writel(0, base + ICMR); /* disable all IRQs */
|
||||
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
|
||||
for (i = n; (i < (n + 32)) && (i < irq_nr); i++) {
|
||||
/* initialize interrupt priority */
|
||||
if (cpu_has_ipr)
|
||||
__raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i));
|
||||
|
||||
irq = PXA_IRQ(i);
|
||||
irq_set_chip_and_handler(irq, &pxa_internal_irq_chip,
|
||||
handle_level_irq);
|
||||
irq_set_chip_data(irq, base);
|
||||
set_irq_flags(irq, IRQF_VALID);
|
||||
}
|
||||
}
|
||||
|
||||
/* only unmasked interrupts kick us out of idle */
|
||||
__raw_writel(1, irq_base(0) + ICCR);
|
||||
|
||||
pxa_internal_irq_chip.irq_set_wake = fn;
|
||||
}
|
||||
|
||||
void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
|
||||
{
|
||||
BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
|
||||
|
||||
pxa_irq_base = io_p2v(0x40d00000);
|
||||
cpu_has_ipr = !cpu_is_pxa25x();
|
||||
pxa_init_irq_common(NULL, irq_nr, fn);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
|
||||
static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
|
||||
|
@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static struct irq_domain *pxa_irq_domain;
|
||||
|
||||
static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
void __iomem *base = irq_base(hw / 32);
|
||||
|
||||
/* initialize interrupt priority */
|
||||
if (cpu_has_ipr)
|
||||
__raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
|
||||
|
||||
irq_set_chip_and_handler(hw, &pxa_internal_irq_chip,
|
||||
handle_level_irq);
|
||||
irq_set_chip_data(hw, base);
|
||||
set_irq_flags(hw, IRQF_VALID);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_domain_ops pxa_irq_ops = {
|
||||
.map = pxa_irq_map,
|
||||
.xlate = irq_domain_xlate_onecell,
|
||||
};
|
||||
|
||||
static const struct of_device_id intc_ids[] __initconst = {
|
||||
{ .compatible = "marvell,pxa-intc", },
|
||||
{}
|
||||
|
@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
|
|||
{
|
||||
struct device_node *node;
|
||||
struct resource res;
|
||||
int n, ret;
|
||||
int ret;
|
||||
|
||||
node = of_find_matching_node(NULL, intc_ids);
|
||||
if (!node) {
|
||||
|
@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
|
|||
return;
|
||||
}
|
||||
|
||||
pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0,
|
||||
&pxa_irq_ops, NULL);
|
||||
if (!pxa_irq_domain)
|
||||
panic("Unable to add PXA IRQ domain\n");
|
||||
|
||||
irq_set_default_host(pxa_irq_domain);
|
||||
|
||||
for (n = 0; n < pxa_internal_irq_nr; n += 32) {
|
||||
void __iomem *base = irq_base(n >> 5);
|
||||
|
||||
__raw_writel(0, base + ICMR); /* disable all IRQs */
|
||||
__raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
|
||||
}
|
||||
|
||||
/* only unmasked interrupts kick us out of idle */
|
||||
__raw_writel(1, irq_base(0) + ICCR);
|
||||
|
||||
pxa_internal_irq_chip.irq_set_wake = fn;
|
||||
pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
|
|
@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
|
|||
};
|
||||
|
||||
static struct platform_device can_regulator_device = {
|
||||
.name = "reg-fixed-volage",
|
||||
.name = "reg-fixed-voltage",
|
||||
.id = 0,
|
||||
.dev = {
|
||||
.platform_data = &can_regulator_pdata,
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
menuconfig ARCH_SUNXI
|
||||
bool "Allwinner SoCs" if ARCH_MULTI_V7
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select CLKSRC_MMIO
|
||||
select GENERIC_IRQ_CHIP
|
||||
select PINCTRL
|
||||
select SUN4I_TIMER
|
||||
select RESET_CONTROLLER
|
||||
|
||||
if ARCH_SUNXI
|
||||
|
||||
|
@ -20,10 +22,8 @@ config MACH_SUN5I
|
|||
config MACH_SUN6I
|
||||
bool "Allwinner A31 (sun6i) SoCs support"
|
||||
default ARCH_SUNXI
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select ARM_GIC
|
||||
select MFD_SUN6I_PRCM
|
||||
select RESET_CONTROLLER
|
||||
select SUN5I_HSTIMER
|
||||
|
||||
config MACH_SUN7I
|
||||
|
@ -37,16 +37,12 @@ config MACH_SUN7I
|
|||
config MACH_SUN8I
|
||||
bool "Allwinner A23 (sun8i) SoCs support"
|
||||
default ARCH_SUNXI
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select ARM_GIC
|
||||
select MFD_SUN6I_PRCM
|
||||
select RESET_CONTROLLER
|
||||
|
||||
config MACH_SUN9I
|
||||
bool "Allwinner (sun9i) SoCs support"
|
||||
default ARCH_SUNXI
|
||||
select ARCH_HAS_RESET_CONTROLLER
|
||||
select ARM_GIC
|
||||
select RESET_CONTROLLER
|
||||
|
||||
endif
|
||||
|
|
|
@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
const struct of_device_id *match;
|
||||
const struct dmtimer_platform_data *pdata;
|
||||
int ret;
|
||||
|
||||
match = of_match_device(of_match_ptr(omap_timer_match), dev);
|
||||
pdata = match ? match->data : dev->platform_data;
|
||||
|
@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
if (!timer->reserved) {
|
||||
pm_runtime_get_sync(dev);
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "%s: pm_runtime_get_sync failed!\n",
|
||||
__func__);
|
||||
goto err_get_sync;
|
||||
}
|
||||
__omap_dm_timer_init_regs(timer);
|
||||
pm_runtime_put(dev);
|
||||
}
|
||||
|
@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
|
|||
dev_dbg(dev, "Device Probed.\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_get_sync:
|
||||
pm_runtime_put_noidle(dev);
|
||||
pm_runtime_disable(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
|
|||
}
|
||||
spin_unlock_irqrestore(&dm_timer_lock, flags);
|
||||
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
*/
|
||||
|
||||
/* SoC fixed clocks */
|
||||
soc_uartclk: refclk72738khz {
|
||||
soc_uartclk: refclk7273800hz {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
clock-frequency = <7273800>;
|
||||
|
|
|
@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
|||
__ret; \
|
||||
})
|
||||
|
||||
#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
|
||||
#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
|
||||
#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
|
||||
#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
|
||||
#define _protect_cmpxchg_local(pcp, o, n) \
|
||||
({ \
|
||||
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
|
||||
preempt_disable(); \
|
||||
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
|
||||
preempt_enable(); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
|
||||
o1, o2, n1, n2)
|
||||
#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
|
||||
|
||||
#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
|
||||
({ \
|
||||
int __ret; \
|
||||
preempt_disable(); \
|
||||
__ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
|
||||
raw_cpu_ptr(&(ptr2)), \
|
||||
o1, o2, n1, n2); \
|
||||
preempt_enable(); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
|
||||
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
|
||||
|
|
|
@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
|||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* init_mm.pgd does not contain any user mappings and it is always
|
||||
* active for kernel addresses in TTBR1. Just set the reserved TTBR0.
|
||||
*/
|
||||
if (next == &init_mm) {
|
||||
cpu_set_reserved_ttbr0();
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
|
||||
check_and_switch_context(next, tsk);
|
||||
}
|
||||
|
|
|
@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define _percpu_add(pcp, val) \
|
||||
__percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
|
||||
#define _percpu_read(pcp) \
|
||||
({ \
|
||||
typeof(pcp) __retval; \
|
||||
preempt_disable(); \
|
||||
__retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
|
||||
sizeof(pcp)); \
|
||||
preempt_enable(); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
|
||||
#define _percpu_write(pcp, val) \
|
||||
do { \
|
||||
preempt_disable(); \
|
||||
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
|
||||
sizeof(pcp)); \
|
||||
preempt_enable(); \
|
||||
} while(0) \
|
||||
|
||||
#define _pcp_protect(operation, pcp, val) \
|
||||
({ \
|
||||
typeof(pcp) __retval; \
|
||||
preempt_disable(); \
|
||||
__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
|
||||
(val), sizeof(pcp)); \
|
||||
preempt_enable(); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
#define _percpu_add(pcp, val) \
|
||||
_pcp_protect(__percpu_add, pcp, val)
|
||||
|
||||
#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
|
||||
|
||||
#define _percpu_and(pcp, val) \
|
||||
__percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
|
||||
_pcp_protect(__percpu_and, pcp, val)
|
||||
|
||||
#define _percpu_or(pcp, val) \
|
||||
__percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
|
||||
|
||||
#define _percpu_read(pcp) (typeof(pcp)) \
|
||||
(__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
|
||||
|
||||
#define _percpu_write(pcp, val) \
|
||||
__percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
|
||||
_pcp_protect(__percpu_or, pcp, val)
|
||||
|
||||
#define _percpu_xchg(pcp, val) (typeof(pcp)) \
|
||||
(__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
|
||||
_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
|
||||
|
||||
#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
|
||||
#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _ASM_METAG_IO_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/pgtable-bits.h>
|
||||
|
||||
#define IO_SPACE_LIMIT 0
|
||||
|
||||
|
|
|
@ -0,0 +1,104 @@
|
|||
/*
|
||||
* Meta page table definitions.
|
||||
*/
|
||||
|
||||
#ifndef _METAG_PGTABLE_BITS_H
|
||||
#define _METAG_PGTABLE_BITS_H
|
||||
|
||||
#include <asm/metag_mem.h>
|
||||
|
||||
/*
|
||||
* Definitions for MMU descriptors
|
||||
*
|
||||
* These are the hardware bits in the MMCU pte entries.
|
||||
* Derived from the Meta toolkit headers.
|
||||
*/
|
||||
#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
|
||||
#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
|
||||
#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
|
||||
/* Write combine bit - this can cause writes to occur out of order */
|
||||
#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
|
||||
/* Sys coherent bit - this bit is never used by Linux */
|
||||
#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
|
||||
#define _PAGE_ALWAYS_ZERO_1 0x020
|
||||
#define _PAGE_CACHE_CTRL0 0x040
|
||||
#define _PAGE_CACHE_CTRL1 0x080
|
||||
#define _PAGE_ALWAYS_ZERO_2 0x100
|
||||
#define _PAGE_ALWAYS_ZERO_3 0x200
|
||||
#define _PAGE_ALWAYS_ZERO_4 0x400
|
||||
#define _PAGE_ALWAYS_ZERO_5 0x800
|
||||
|
||||
/* These are software bits that we stuff into the gaps in the hardware
|
||||
* pte entries that are not used. Note, these DO get stored in the actual
|
||||
* hardware, but the hardware just does not use them.
|
||||
*/
|
||||
#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
|
||||
#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
|
||||
|
||||
/* Pages owned, and protected by, the kernel. */
|
||||
#define _PAGE_KERNEL _PAGE_PRIV
|
||||
|
||||
/* No cacheing of this page */
|
||||
#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
|
||||
/* burst cacheing - good for data streaming */
|
||||
#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
|
||||
/* One cache way per thread */
|
||||
#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
|
||||
/* Full on cacheing */
|
||||
#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
|
||||
|
||||
#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
|
||||
|
||||
/* which bits are used for cache control ... */
|
||||
#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
|
||||
_PAGE_WR_COMBINE)
|
||||
|
||||
/* This is a mask of the bits that pte_modify is allowed to change. */
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK)
|
||||
|
||||
#define _PAGE_SZ_SHIFT 1
|
||||
#define _PAGE_SZ_4K (0x0)
|
||||
#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
|
||||
|
||||
#if defined(CONFIG_PAGE_SIZE_4K)
|
||||
#define _PAGE_SZ (_PAGE_SZ_4K)
|
||||
#elif defined(CONFIG_PAGE_SIZE_8K)
|
||||
#define _PAGE_SZ (_PAGE_SZ_8K)
|
||||
#elif defined(CONFIG_PAGE_SIZE_16K)
|
||||
#define _PAGE_SZ (_PAGE_SZ_16K)
|
||||
#endif
|
||||
#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_8K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_16K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_32K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_64K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_128K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_256K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_512K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_1M)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_2M)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_4M)
|
||||
#endif
|
||||
|
||||
#endif /* _METAG_PGTABLE_BITS_H */
|
|
@ -5,6 +5,7 @@
|
|||
#ifndef _METAG_PGTABLE_H
|
||||
#define _METAG_PGTABLE_H
|
||||
|
||||
#include <asm/pgtable-bits.h>
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
/* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */
|
||||
|
@ -20,100 +21,6 @@
|
|||
#define VMALLOC_END 0x7FFFFFFF
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Definitions for MMU descriptors
|
||||
*
|
||||
* These are the hardware bits in the MMCU pte entries.
|
||||
* Derived from the Meta toolkit headers.
|
||||
*/
|
||||
#define _PAGE_PRESENT MMCU_ENTRY_VAL_BIT
|
||||
#define _PAGE_WRITE MMCU_ENTRY_WR_BIT
|
||||
#define _PAGE_PRIV MMCU_ENTRY_PRIV_BIT
|
||||
/* Write combine bit - this can cause writes to occur out of order */
|
||||
#define _PAGE_WR_COMBINE MMCU_ENTRY_WRC_BIT
|
||||
/* Sys coherent bit - this bit is never used by Linux */
|
||||
#define _PAGE_SYS_COHERENT MMCU_ENTRY_SYS_BIT
|
||||
#define _PAGE_ALWAYS_ZERO_1 0x020
|
||||
#define _PAGE_CACHE_CTRL0 0x040
|
||||
#define _PAGE_CACHE_CTRL1 0x080
|
||||
#define _PAGE_ALWAYS_ZERO_2 0x100
|
||||
#define _PAGE_ALWAYS_ZERO_3 0x200
|
||||
#define _PAGE_ALWAYS_ZERO_4 0x400
|
||||
#define _PAGE_ALWAYS_ZERO_5 0x800
|
||||
|
||||
/* These are software bits that we stuff into the gaps in the hardware
|
||||
* pte entries that are not used. Note, these DO get stored in the actual
|
||||
* hardware, but the hardware just does not use them.
|
||||
*/
|
||||
#define _PAGE_ACCESSED _PAGE_ALWAYS_ZERO_1
|
||||
#define _PAGE_DIRTY _PAGE_ALWAYS_ZERO_2
|
||||
|
||||
/* Pages owned, and protected by, the kernel. */
|
||||
#define _PAGE_KERNEL _PAGE_PRIV
|
||||
|
||||
/* No cacheing of this page */
|
||||
#define _PAGE_CACHE_WIN0 (MMCU_CWIN_UNCACHED << MMCU_ENTRY_CWIN_S)
|
||||
/* burst cacheing - good for data streaming */
|
||||
#define _PAGE_CACHE_WIN1 (MMCU_CWIN_BURST << MMCU_ENTRY_CWIN_S)
|
||||
/* One cache way per thread */
|
||||
#define _PAGE_CACHE_WIN2 (MMCU_CWIN_C1SET << MMCU_ENTRY_CWIN_S)
|
||||
/* Full on cacheing */
|
||||
#define _PAGE_CACHE_WIN3 (MMCU_CWIN_CACHED << MMCU_ENTRY_CWIN_S)
|
||||
|
||||
#define _PAGE_CACHEABLE (_PAGE_CACHE_WIN3 | _PAGE_WR_COMBINE)
|
||||
|
||||
/* which bits are used for cache control ... */
|
||||
#define _PAGE_CACHE_MASK (_PAGE_CACHE_CTRL0 | _PAGE_CACHE_CTRL1 | \
|
||||
_PAGE_WR_COMBINE)
|
||||
|
||||
/* This is a mask of the bits that pte_modify is allowed to change. */
|
||||
#define _PAGE_CHG_MASK (PAGE_MASK)
|
||||
|
||||
#define _PAGE_SZ_SHIFT 1
|
||||
#define _PAGE_SZ_4K (0x0)
|
||||
#define _PAGE_SZ_8K (0x1 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_16K (0x2 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_32K (0x3 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_64K (0x4 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_128K (0x5 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_256K (0x6 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_512K (0x7 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_1M (0x8 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_2M (0x9 << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_4M (0xa << _PAGE_SZ_SHIFT)
|
||||
#define _PAGE_SZ_MASK (0xf << _PAGE_SZ_SHIFT)
|
||||
|
||||
#if defined(CONFIG_PAGE_SIZE_4K)
|
||||
#define _PAGE_SZ (_PAGE_SZ_4K)
|
||||
#elif defined(CONFIG_PAGE_SIZE_8K)
|
||||
#define _PAGE_SZ (_PAGE_SZ_8K)
|
||||
#elif defined(CONFIG_PAGE_SIZE_16K)
|
||||
#define _PAGE_SZ (_PAGE_SZ_16K)
|
||||
#endif
|
||||
#define _PAGE_TABLE (_PAGE_SZ | _PAGE_PRESENT)
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE_SIZE_8K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_8K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_16K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_16K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_32K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_32K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_64K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_128K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_128K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_256K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_512K)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1M)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_1M)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_2M)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_2M)
|
||||
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4M)
|
||||
# define _PAGE_SZHUGE (_PAGE_SZ_4M)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The Linux memory management assumes a three-level page table setup. On
|
||||
* Meta, we use that, but "fold" the mid level into the top-level page
|
||||
|
|
|
@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
|
||||
if (likely(pgd != NULL)) {
|
||||
memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER);
|
||||
#ifdef CONFIG_64BIT
|
||||
#if PT_NLEVELS == 3
|
||||
actual_pgd += PTRS_PER_PGD;
|
||||
/* Populate first pmd with allocated memory. We mark it
|
||||
* with PxD_FLAG_ATTACHED as a signal to the system that this
|
||||
|
@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
#if PT_NLEVELS == 3
|
||||
pgd -= PTRS_PER_PGD;
|
||||
#endif
|
||||
free_pages((unsigned long)pgd, PGD_ALLOC_ORDER);
|
||||
|
@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
|||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
|
||||
/* This is the permanent pmd attached to the pgd;
|
||||
* cannot free it */
|
||||
/*
|
||||
* This is the permanent pmd attached to the pgd;
|
||||
* cannot free it.
|
||||
* Increment the counter to compensate for the decrement
|
||||
* done by generic mm code.
|
||||
*/
|
||||
mm_inc_nr_pmds(mm);
|
||||
return;
|
||||
#endif
|
||||
free_pages((unsigned long)pmd, PMD_ORDER);
|
||||
}
|
||||
|
||||
|
@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
#if PT_NLEVELS == 3
|
||||
/* preserve the gateway marker if this is the beginning of
|
||||
* the permanent pmd */
|
||||
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
|
||||
|
|
|
@ -55,8 +55,8 @@
|
|||
#define ENTRY_COMP(_name_) .word sys_##_name_
|
||||
#endif
|
||||
|
||||
ENTRY_SAME(restart_syscall) /* 0 */
|
||||
ENTRY_SAME(exit)
|
||||
90: ENTRY_SAME(restart_syscall) /* 0 */
|
||||
91: ENTRY_SAME(exit)
|
||||
ENTRY_SAME(fork_wrapper)
|
||||
ENTRY_SAME(read)
|
||||
ENTRY_SAME(write)
|
||||
|
@ -439,7 +439,10 @@
|
|||
ENTRY_SAME(bpf)
|
||||
ENTRY_COMP(execveat)
|
||||
|
||||
/* Nothing yet */
|
||||
|
||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||
.error "size of syscall table does not fit value of __NR_Linux_syscalls"
|
||||
.endif
|
||||
|
||||
#undef ENTRY_SAME
|
||||
#undef ENTRY_DIFF
|
||||
|
|
|
@ -153,6 +153,7 @@
|
|||
#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
|
||||
#define PPC_INST_MFTMR 0x7c0002dc
|
||||
#define PPC_INST_MSGSND 0x7c00019c
|
||||
#define PPC_INST_MSGCLR 0x7c0001dc
|
||||
#define PPC_INST_MSGSNDP 0x7c00011c
|
||||
#define PPC_INST_MTTMR 0x7c0003dc
|
||||
#define PPC_INST_NOP 0x60000000
|
||||
|
@ -311,6 +312,8 @@
|
|||
___PPC_RB(b) | __PPC_EH(eh))
|
||||
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
|
||||
___PPC_RB(b))
|
||||
#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
|
||||
___PPC_RB(b))
|
||||
#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
|
||||
___PPC_RB(b))
|
||||
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
|
||||
|
|
|
@ -608,13 +608,16 @@
|
|||
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
|
||||
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
|
||||
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
|
||||
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
|
||||
#define SRR1_WAKESYSERR 0x00300000 /* System error */
|
||||
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
|
||||
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
|
||||
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
|
||||
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
|
||||
#define SRR1_WAKEDBELL 0x00140000 /* Privileged doorbell on P8 */
|
||||
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
|
||||
#define SRR1_WAKERESET 0x00100000 /* System reset */
|
||||
#define SRR1_WAKEHDBELL 0x000c0000 /* Hypervisor doorbell on P8 */
|
||||
#define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */
|
||||
#define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained,
|
||||
* may not be recoverable */
|
||||
|
|
|
@ -437,6 +437,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
.machine_check_early = __machine_check_early_realmode_p8,
|
||||
.platform = "power8",
|
||||
},
|
||||
{ /* Power8NVL */
|
||||
.pvr_mask = 0xffff0000,
|
||||
.pvr_value = 0x004c0000,
|
||||
.cpu_name = "POWER8NVL (raw)",
|
||||
.cpu_features = CPU_FTRS_POWER8,
|
||||
.cpu_user_features = COMMON_USER_POWER8,
|
||||
.cpu_user_features2 = COMMON_USER2_POWER8,
|
||||
.mmu_features = MMU_FTRS_POWER8,
|
||||
.icache_bsize = 128,
|
||||
.dcache_bsize = 128,
|
||||
.num_pmcs = 6,
|
||||
.pmc_type = PPC_PMC_IBM,
|
||||
.oprofile_cpu_type = "ppc64/power8",
|
||||
.oprofile_type = PPC_OPROFILE_INVALID,
|
||||
.cpu_setup = __setup_cpu_power8,
|
||||
.cpu_restore = __restore_cpu_power8,
|
||||
.flush_tlb = __flush_tlb_power8,
|
||||
.machine_check_early = __machine_check_early_realmode_p8,
|
||||
.platform = "power8",
|
||||
},
|
||||
{ /* Power8 DD1: Does not support doorbell IPIs */
|
||||
.pvr_mask = 0xffffff00,
|
||||
.pvr_value = 0x004d0100,
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
|
||||
#include <asm/dbell.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void doorbell_setup_this_cpu(void)
|
||||
|
@ -41,6 +42,7 @@ void doorbell_exception(struct pt_regs *regs)
|
|||
|
||||
may_hard_irq_enable();
|
||||
|
||||
kvmppc_set_host_ipi(smp_processor_id(), 0);
|
||||
__this_cpu_inc(irq_stat.doorbell_irqs);
|
||||
|
||||
smp_ipi_demux();
|
||||
|
|
|
@ -1408,7 +1408,7 @@ machine_check_handle_early:
|
|||
bne 9f /* continue in V mode if we are. */
|
||||
|
||||
5:
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HV
|
||||
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
|
||||
/*
|
||||
* We are coming from kernel context. Check if we are coming from
|
||||
* guest. if yes, then we can continue. We will fall through
|
||||
|
|
|
@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
|
|||
spin_lock(&vcpu->arch.vpa_update_lock);
|
||||
lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
|
||||
if (lppaca)
|
||||
yield_count = lppaca->yield_count;
|
||||
yield_count = be32_to_cpu(lppaca->yield_count);
|
||||
spin_unlock(&vcpu->arch.vpa_update_lock);
|
||||
return yield_count;
|
||||
}
|
||||
|
@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
|
|||
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
||||
bool preserve_top32)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 mask;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
spin_lock(&vc->lock);
|
||||
/*
|
||||
* If ILE (interrupt little-endian) has changed, update the
|
||||
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
|
||||
*/
|
||||
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (vcpu->arch.vcore != vc)
|
||||
continue;
|
||||
|
@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
|||
else
|
||||
vcpu->arch.intr_msr &= ~MSR_LE;
|
||||
}
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
|||
mask &= 0xFFFFFFFF;
|
||||
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
|
||||
spin_unlock(&vc->lock);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
|
|
|
@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
/* Save HEIR (HV emulation assist reg) in emul_inst
|
||||
if this is an HEI (HV emulation interrupt, e40) */
|
||||
li r3,KVM_INST_FETCH_FAILED
|
||||
stw r3,VCPU_LAST_INST(r9)
|
||||
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
|
||||
bne 11f
|
||||
mfspr r3,SPRN_HEIR
|
||||
|
|
|
@ -33,6 +33,8 @@
|
|||
#include <asm/runlatch.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/dbell.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
#include "powernv.h"
|
||||
|
||||
|
@ -149,7 +151,7 @@ static int pnv_smp_cpu_disable(void)
|
|||
static void pnv_smp_cpu_kill_self(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned long srr1;
|
||||
unsigned long srr1, wmask;
|
||||
u32 idle_states;
|
||||
|
||||
/* Standard hot unplug procedure */
|
||||
|
@ -161,6 +163,10 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
generic_set_cpu_dead(cpu);
|
||||
smp_wmb();
|
||||
|
||||
wmask = SRR1_WAKEMASK;
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
||||
wmask = SRR1_WAKEMASK_P8;
|
||||
|
||||
idle_states = pnv_get_supported_cpuidle_states();
|
||||
/* We don't want to take decrementer interrupts while we are offline,
|
||||
* so clear LPCR:PECE1. We keep PECE2 enabled.
|
||||
|
@ -191,10 +197,14 @@ static void pnv_smp_cpu_kill_self(void)
|
|||
* having finished executing in a KVM guest, then srr1
|
||||
* contains 0.
|
||||
*/
|
||||
if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
|
||||
if ((srr1 & wmask) == SRR1_WAKEEE) {
|
||||
icp_native_flush_interrupt();
|
||||
local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
|
||||
smp_mb();
|
||||
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
|
||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
}
|
||||
|
||||
if (cpu_core_split_required())
|
||||
|
|
|
@ -25,10 +25,10 @@
|
|||
static struct kobject *mobility_kobj;
|
||||
|
||||
struct update_props_workarea {
|
||||
u32 phandle;
|
||||
u32 state;
|
||||
u64 reserved;
|
||||
u32 nprops;
|
||||
__be32 phandle;
|
||||
__be32 state;
|
||||
__be64 reserved;
|
||||
__be32 nprops;
|
||||
} __packed;
|
||||
|
||||
#define NODE_ACTION_MASK 0xff000000
|
||||
|
@ -54,11 +54,11 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int delete_dt_node(u32 phandle)
|
||||
static int delete_dt_node(__be32 phandle)
|
||||
{
|
||||
struct device_node *dn;
|
||||
|
||||
dn = of_find_node_by_phandle(phandle);
|
||||
dn = of_find_node_by_phandle(be32_to_cpu(phandle));
|
||||
if (!dn)
|
||||
return -ENOENT;
|
||||
|
||||
|
@ -127,7 +127,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int update_dt_node(u32 phandle, s32 scope)
|
||||
static int update_dt_node(__be32 phandle, s32 scope)
|
||||
{
|
||||
struct update_props_workarea *upwa;
|
||||
struct device_node *dn;
|
||||
|
@ -136,6 +136,7 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|||
char *prop_data;
|
||||
char *rtas_buf;
|
||||
int update_properties_token;
|
||||
u32 nprops;
|
||||
u32 vd;
|
||||
|
||||
update_properties_token = rtas_token("ibm,update-properties");
|
||||
|
@ -146,7 +147,7 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|||
if (!rtas_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
dn = of_find_node_by_phandle(phandle);
|
||||
dn = of_find_node_by_phandle(be32_to_cpu(phandle));
|
||||
if (!dn) {
|
||||
kfree(rtas_buf);
|
||||
return -ENOENT;
|
||||
|
@ -162,6 +163,7 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|||
break;
|
||||
|
||||
prop_data = rtas_buf + sizeof(*upwa);
|
||||
nprops = be32_to_cpu(upwa->nprops);
|
||||
|
||||
/* On the first call to ibm,update-properties for a node the
|
||||
* the first property value descriptor contains an empty
|
||||
|
@ -170,17 +172,17 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|||
*/
|
||||
if (*prop_data == 0) {
|
||||
prop_data++;
|
||||
vd = *(u32 *)prop_data;
|
||||
vd = be32_to_cpu(*(__be32 *)prop_data);
|
||||
prop_data += vd + sizeof(vd);
|
||||
upwa->nprops--;
|
||||
nprops--;
|
||||
}
|
||||
|
||||
for (i = 0; i < upwa->nprops; i++) {
|
||||
for (i = 0; i < nprops; i++) {
|
||||
char *prop_name;
|
||||
|
||||
prop_name = prop_data;
|
||||
prop_data += strlen(prop_name) + 1;
|
||||
vd = *(u32 *)prop_data;
|
||||
vd = be32_to_cpu(*(__be32 *)prop_data);
|
||||
prop_data += sizeof(vd);
|
||||
|
||||
switch (vd) {
|
||||
|
@ -212,13 +214,13 @@ static int update_dt_node(u32 phandle, s32 scope)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int add_dt_node(u32 parent_phandle, u32 drc_index)
|
||||
static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
|
||||
{
|
||||
struct device_node *dn;
|
||||
struct device_node *parent_dn;
|
||||
int rc;
|
||||
|
||||
parent_dn = of_find_node_by_phandle(parent_phandle);
|
||||
parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
|
||||
if (!parent_dn)
|
||||
return -ENOENT;
|
||||
|
||||
|
@ -237,7 +239,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
|
|||
int pseries_devicetree_update(s32 scope)
|
||||
{
|
||||
char *rtas_buf;
|
||||
u32 *data;
|
||||
__be32 *data;
|
||||
int update_nodes_token;
|
||||
int rc;
|
||||
|
||||
|
@ -254,17 +256,17 @@ int pseries_devicetree_update(s32 scope)
|
|||
if (rc && rc != 1)
|
||||
break;
|
||||
|
||||
data = (u32 *)rtas_buf + 4;
|
||||
while (*data & NODE_ACTION_MASK) {
|
||||
data = (__be32 *)rtas_buf + 4;
|
||||
while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
|
||||
int i;
|
||||
u32 action = *data & NODE_ACTION_MASK;
|
||||
int node_count = *data & NODE_COUNT_MASK;
|
||||
u32 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
|
||||
u32 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
|
||||
|
||||
data++;
|
||||
|
||||
for (i = 0; i < node_count; i++) {
|
||||
u32 phandle = *data++;
|
||||
u32 drc_index;
|
||||
__be32 phandle = *data++;
|
||||
__be32 drc_index;
|
||||
|
||||
switch (action) {
|
||||
case DELETE_DT_NODE:
|
||||
|
|
|
@ -211,7 +211,7 @@ do { \
|
|||
|
||||
extern unsigned long mmap_rnd_mask;
|
||||
|
||||
#define STACK_RND_MASK (mmap_rnd_mask)
|
||||
#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
|
||||
|
||||
#define ARCH_DLINFO \
|
||||
do { \
|
||||
|
|
|
@ -57,6 +57,44 @@
|
|||
|
||||
unsigned long ftrace_plt;
|
||||
|
||||
static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
/* brcl 0,0 */
|
||||
insn->opc = 0xc004;
|
||||
insn->disp = 0;
|
||||
#else
|
||||
/* stg r14,8(r15) */
|
||||
insn->opc = 0xe3e0;
|
||||
insn->disp = 0xf0080024;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_KPROBES
|
||||
if (insn->opc == BREAKPOINT_INSTRUCTION)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_KPROBES
|
||||
insn->opc = BREAKPOINT_INSTRUCTION;
|
||||
insn->disp = KPROBE_ON_FTRACE_NOP;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
|
||||
{
|
||||
#ifdef CONFIG_KPROBES
|
||||
insn->opc = BREAKPOINT_INSTRUCTION;
|
||||
insn->disp = KPROBE_ON_FTRACE_CALL;
|
||||
#endif
|
||||
}
|
||||
|
||||
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
unsigned long addr)
|
||||
{
|
||||
|
@ -72,16 +110,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
|||
return -EFAULT;
|
||||
if (addr == MCOUNT_ADDR) {
|
||||
/* Initial code replacement */
|
||||
#ifdef CC_USING_HOTPATCH
|
||||
/* We expect to see brcl 0,0 */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
#else
|
||||
/* We expect to see stg r14,8(r15) */
|
||||
orig.opc = 0xe3e0;
|
||||
orig.disp = 0xf0080024;
|
||||
#endif
|
||||
ftrace_generate_orig_insn(&orig);
|
||||
ftrace_generate_nop_insn(&new);
|
||||
} else if (old.opc == BREAKPOINT_INSTRUCTION) {
|
||||
} else if (is_kprobe_on_ftrace(&old)) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
|
@ -89,9 +120,8 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
|
|||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a nop, if it reaches this breakpoint.
|
||||
*/
|
||||
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
|
||||
orig.disp = KPROBE_ON_FTRACE_CALL;
|
||||
new.disp = KPROBE_ON_FTRACE_NOP;
|
||||
ftrace_generate_kprobe_call_insn(&orig);
|
||||
ftrace_generate_kprobe_nop_insn(&new);
|
||||
} else {
|
||||
/* Replace ftrace call with a nop. */
|
||||
ftrace_generate_call_insn(&orig, rec->ip);
|
||||
|
@ -111,7 +141,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
|
||||
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
|
||||
return -EFAULT;
|
||||
if (old.opc == BREAKPOINT_INSTRUCTION) {
|
||||
if (is_kprobe_on_ftrace(&old)) {
|
||||
/*
|
||||
* If we find a breakpoint instruction, a kprobe has been
|
||||
* placed at the beginning of the function. We write the
|
||||
|
@ -119,9 +149,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
|||
* bytes of the original instruction so that the kprobes
|
||||
* handler can execute a brasl if it reaches this breakpoint.
|
||||
*/
|
||||
new.opc = orig.opc = BREAKPOINT_INSTRUCTION;
|
||||
orig.disp = KPROBE_ON_FTRACE_NOP;
|
||||
new.disp = KPROBE_ON_FTRACE_CALL;
|
||||
ftrace_generate_kprobe_nop_insn(&orig);
|
||||
ftrace_generate_kprobe_call_insn(&new);
|
||||
} else {
|
||||
/* Replace nop with an ftrace call. */
|
||||
ftrace_generate_nop_insn(&orig);
|
||||
|
|
|
@ -1415,7 +1415,7 @@ CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
|
|||
|
||||
static struct attribute *cpumsf_pmu_events_attr[] = {
|
||||
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
|
||||
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG),
|
||||
NULL,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -1606,8 +1606,11 @@ static int __init init_cpum_sampling_pmu(void)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (si.ad)
|
||||
if (si.ad) {
|
||||
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
|
||||
cpumsf_pmu_events_attr[1] =
|
||||
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
|
||||
}
|
||||
|
||||
sfdbg = debug_register(KMSG_COMPONENT, 2, 1, 80);
|
||||
if (!sfdbg)
|
||||
|
|
|
@ -177,6 +177,17 @@ restart_entry:
|
|||
lhi %r1,1
|
||||
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
|
||||
sam64
|
||||
#ifdef CONFIG_SMP
|
||||
larl %r1,smp_cpu_mt_shift
|
||||
icm %r1,15,0(%r1)
|
||||
jz smt_done
|
||||
llgfr %r1,%r1
|
||||
smt_loop:
|
||||
sigp %r1,%r0,SIGP_SET_MULTI_THREADING
|
||||
brc 8,smt_done /* accepted */
|
||||
brc 2,smt_loop /* busy, try again */
|
||||
smt_done:
|
||||
#endif
|
||||
larl %r1,.Lnew_pgm_check_psw
|
||||
lpswe 0(%r1)
|
||||
pgm_check_entry:
|
||||
|
|
|
@ -364,12 +364,21 @@ system_call_fastpath:
|
|||
* Has incomplete stack frame and undefined top of stack.
|
||||
*/
|
||||
ret_from_sys_call:
|
||||
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
||||
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
|
||||
|
||||
LOCKDEP_SYS_EXIT
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
|
||||
/*
|
||||
* We must check ti flags with interrupts (or at least preemption)
|
||||
* off because we must *never* return to userspace without
|
||||
* processing exit work that is enqueued if we're preempted here.
|
||||
* In particular, returning to userspace with any of the one-shot
|
||||
* flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
|
||||
* very bad.
|
||||
*/
|
||||
testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
||||
jnz int_ret_from_sys_call_fixup /* Go the the slow path */
|
||||
|
||||
CFI_REMEMBER_STATE
|
||||
/*
|
||||
* sysretq will re-enable interrupts:
|
||||
|
@ -386,7 +395,7 @@ ret_from_sys_call:
|
|||
|
||||
int_ret_from_sys_call_fixup:
|
||||
FIXUP_TOP_OF_STACK %r11, -ARGOFFSET
|
||||
jmp int_ret_from_sys_call
|
||||
jmp int_ret_from_sys_call_irqs_off
|
||||
|
||||
/* Do syscall tracing */
|
||||
tracesys:
|
||||
|
@ -432,6 +441,7 @@ tracesys_phase2:
|
|||
GLOBAL(int_ret_from_sys_call)
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
int_ret_from_sys_call_irqs_off:
|
||||
movl $_TIF_ALLWORK_MASK,%edi
|
||||
/* edi: mask to check */
|
||||
GLOBAL(int_with_check)
|
||||
|
|
|
@ -422,6 +422,7 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
|
|||
struct kvm_ioapic *ioapic, int vector, int trigger_mode)
|
||||
{
|
||||
int i;
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
|
||||
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
|
||||
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
|
||||
|
@ -443,7 +444,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
|
|||
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
|
||||
spin_lock(&ioapic->lock);
|
||||
|
||||
if (trigger_mode != IOAPIC_LEVEL_TRIG)
|
||||
if (trigger_mode != IOAPIC_LEVEL_TRIG ||
|
||||
kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
|
||||
continue;
|
||||
|
||||
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
|
||||
|
|
|
@ -833,8 +833,7 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
|
|||
|
||||
static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
|
||||
{
|
||||
if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
|
||||
kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
|
||||
if (kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
|
||||
int trigger_mode;
|
||||
if (apic_test_vector(vector, apic->regs + APIC_TMR))
|
||||
trigger_mode = IOAPIC_LEVEL_TRIG;
|
||||
|
|
|
@ -2479,8 +2479,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
|
|||
if (enable_ept) {
|
||||
/* nested EPT: emulate EPT also to L1 */
|
||||
vmx->nested.nested_vmx_secondary_ctls_high |=
|
||||
SECONDARY_EXEC_ENABLE_EPT |
|
||||
SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
||||
SECONDARY_EXEC_ENABLE_EPT;
|
||||
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
|
||||
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
|
||||
VMX_EPT_INVEPT_BIT;
|
||||
|
@ -2494,6 +2493,10 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
|
|||
} else
|
||||
vmx->nested.nested_vmx_ept_caps = 0;
|
||||
|
||||
if (enable_unrestricted_guest)
|
||||
vmx->nested.nested_vmx_secondary_ctls_high |=
|
||||
SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
||||
|
||||
/* miscellaneous data */
|
||||
rdmsr(MSR_IA32_VMX_MISC,
|
||||
vmx->nested.nested_vmx_misc_low,
|
||||
|
|
|
@ -592,7 +592,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
|
|||
if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
|
||||
struct bio_vec *bprev;
|
||||
|
||||
bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
|
||||
bprev = &rq->biotail->bi_io_vec[rq->biotail->bi_vcnt - 1];
|
||||
if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -278,9 +278,11 @@ static int bt_get(struct blk_mq_alloc_data *data,
|
|||
/*
|
||||
* We're out of tags on this hardware queue, kick any
|
||||
* pending IO submits before going to sleep waiting for
|
||||
* some to complete.
|
||||
* some to complete. Note that hctx can be NULL here for
|
||||
* reserved tag allocation.
|
||||
*/
|
||||
blk_mq_run_hw_queue(hctx, false);
|
||||
if (hctx)
|
||||
blk_mq_run_hw_queue(hctx, false);
|
||||
|
||||
/*
|
||||
* Retry tag allocation after running the hardware queue,
|
||||
|
|
|
@ -1938,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||
*/
|
||||
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
|
||||
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
|
||||
goto err_map;
|
||||
goto err_mq_usage;
|
||||
|
||||
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
|
||||
blk_queue_rq_timeout(q, 30000);
|
||||
|
@ -1981,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
||||
|
||||
if (blk_mq_init_hw_queues(q, set))
|
||||
goto err_hw;
|
||||
goto err_mq_usage;
|
||||
|
||||
mutex_lock(&all_q_mutex);
|
||||
list_add_tail(&q->all_q_node, &all_q_list);
|
||||
|
@ -1993,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
|
|||
|
||||
return q;
|
||||
|
||||
err_hw:
|
||||
err_mq_usage:
|
||||
blk_cleanup_queue(q);
|
||||
err_hctxs:
|
||||
kfree(map);
|
||||
|
|
|
@ -4204,9 +4204,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
|
||||
|
||||
/* devices that don't properly handle queued TRIM commands */
|
||||
{ "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
|
||||
ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
|
||||
|
||||
/*
|
||||
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
|
||||
|
@ -4226,6 +4235,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||
*/
|
||||
{ "INTEL*SSDSC2MH*", NULL, 0, },
|
||||
|
||||
{ "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
|
||||
|
@ -4737,7 +4748,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
|
|||
return NULL;
|
||||
|
||||
/* libsas case */
|
||||
if (!ap->scsi_host) {
|
||||
if (ap->flags & ATA_FLAG_SAS_HOST) {
|
||||
tag = ata_sas_allocate_tag(ap);
|
||||
if (tag < 0)
|
||||
return NULL;
|
||||
|
@ -4776,7 +4787,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
|
|||
tag = qc->tag;
|
||||
if (likely(ata_tag_valid(tag))) {
|
||||
qc->tag = ATA_TAG_POISON;
|
||||
if (!ap->scsi_host)
|
||||
if (ap->flags & ATA_FLAG_SAS_HOST)
|
||||
ata_sas_free_tag(tag, ap);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -243,4 +243,12 @@ extern struct regcache_ops regcache_rbtree_ops;
|
|||
extern struct regcache_ops regcache_lzo_ops;
|
||||
extern struct regcache_ops regcache_flat_ops;
|
||||
|
||||
static inline const char *regmap_name(const struct regmap *map)
|
||||
{
|
||||
if (map->dev)
|
||||
return dev_name(map->dev);
|
||||
|
||||
return map->name;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -218,7 +218,7 @@ int regcache_read(struct regmap *map,
|
|||
ret = map->cache_ops->read(map, reg, value);
|
||||
|
||||
if (ret == 0)
|
||||
trace_regmap_reg_read_cache(map->dev, reg, *value);
|
||||
trace_regmap_reg_read_cache(map, reg, *value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -311,7 +311,7 @@ int regcache_sync(struct regmap *map)
|
|||
dev_dbg(map->dev, "Syncing %s cache\n",
|
||||
map->cache_ops->name);
|
||||
name = map->cache_ops->name;
|
||||
trace_regcache_sync(map->dev, name, "start");
|
||||
trace_regcache_sync(map, name, "start");
|
||||
|
||||
if (!map->cache_dirty)
|
||||
goto out;
|
||||
|
@ -346,7 +346,7 @@ int regcache_sync(struct regmap *map)
|
|||
|
||||
regmap_async_complete(map);
|
||||
|
||||
trace_regcache_sync(map->dev, name, "stop");
|
||||
trace_regcache_sync(map, name, "stop");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -381,7 +381,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
|
|||
name = map->cache_ops->name;
|
||||
dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
|
||||
|
||||
trace_regcache_sync(map->dev, name, "start region");
|
||||
trace_regcache_sync(map, name, "start region");
|
||||
|
||||
if (!map->cache_dirty)
|
||||
goto out;
|
||||
|
@ -401,7 +401,7 @@ int regcache_sync_region(struct regmap *map, unsigned int min,
|
|||
|
||||
regmap_async_complete(map);
|
||||
|
||||
trace_regcache_sync(map->dev, name, "stop region");
|
||||
trace_regcache_sync(map, name, "stop region");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ int regcache_drop_region(struct regmap *map, unsigned int min,
|
|||
|
||||
map->lock(map->lock_arg);
|
||||
|
||||
trace_regcache_drop_region(map->dev, min, max);
|
||||
trace_regcache_drop_region(map, min, max);
|
||||
|
||||
ret = map->cache_ops->drop(map, min, max);
|
||||
|
||||
|
@ -455,7 +455,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
|
|||
map->lock(map->lock_arg);
|
||||
WARN_ON(map->cache_bypass && enable);
|
||||
map->cache_only = enable;
|
||||
trace_regmap_cache_only(map->dev, enable);
|
||||
trace_regmap_cache_only(map, enable);
|
||||
map->unlock(map->lock_arg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regcache_cache_only);
|
||||
|
@ -493,7 +493,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
|
|||
map->lock(map->lock_arg);
|
||||
WARN_ON(map->cache_only && enable);
|
||||
map->cache_bypass = enable;
|
||||
trace_regmap_cache_bypass(map->dev, enable);
|
||||
trace_regmap_cache_bypass(map, enable);
|
||||
map->unlock(map->lock_arg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
|
||||
|
|
|
@ -1281,7 +1281,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
if (map->async && map->bus->async_write) {
|
||||
struct regmap_async *async;
|
||||
|
||||
trace_regmap_async_write_start(map->dev, reg, val_len);
|
||||
trace_regmap_async_write_start(map, reg, val_len);
|
||||
|
||||
spin_lock_irqsave(&map->async_lock, flags);
|
||||
async = list_first_entry_or_null(&map->async_free,
|
||||
|
@ -1339,8 +1339,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
return ret;
|
||||
}
|
||||
|
||||
trace_regmap_hw_write_start(map->dev, reg,
|
||||
val_len / map->format.val_bytes);
|
||||
trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
|
||||
|
||||
/* If we're doing a single register write we can probably just
|
||||
* send the work_buf directly, otherwise try to do a gather
|
||||
|
@ -1372,8 +1371,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
|
|||
kfree(buf);
|
||||
}
|
||||
|
||||
trace_regmap_hw_write_done(map->dev, reg,
|
||||
val_len / map->format.val_bytes);
|
||||
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1407,12 +1405,12 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
|
|||
|
||||
map->format.format_write(map, reg, val);
|
||||
|
||||
trace_regmap_hw_write_start(map->dev, reg, 1);
|
||||
trace_regmap_hw_write_start(map, reg, 1);
|
||||
|
||||
ret = map->bus->write(map->bus_context, map->work_buf,
|
||||
map->format.buf_size);
|
||||
|
||||
trace_regmap_hw_write_done(map->dev, reg, 1);
|
||||
trace_regmap_hw_write_done(map, reg, 1);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1470,7 +1468,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
|
|||
dev_info(map->dev, "%x <= %x\n", reg, val);
|
||||
#endif
|
||||
|
||||
trace_regmap_reg_write(map->dev, reg, val);
|
||||
trace_regmap_reg_write(map, reg, val);
|
||||
|
||||
return map->reg_write(context, reg, val);
|
||||
}
|
||||
|
@ -1773,7 +1771,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
|
|||
for (i = 0; i < num_regs; i++) {
|
||||
int reg = regs[i].reg;
|
||||
int val = regs[i].def;
|
||||
trace_regmap_hw_write_start(map->dev, reg, 1);
|
||||
trace_regmap_hw_write_start(map, reg, 1);
|
||||
map->format.format_reg(u8, reg, map->reg_shift);
|
||||
u8 += reg_bytes + pad_bytes;
|
||||
map->format.format_val(u8, val, 0);
|
||||
|
@ -1788,7 +1786,7 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
|
|||
|
||||
for (i = 0; i < num_regs; i++) {
|
||||
int reg = regs[i].reg;
|
||||
trace_regmap_hw_write_done(map->dev, reg, 1);
|
||||
trace_regmap_hw_write_done(map, reg, 1);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -2059,15 +2057,13 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
|
|||
*/
|
||||
u8[0] |= map->read_flag_mask;
|
||||
|
||||
trace_regmap_hw_read_start(map->dev, reg,
|
||||
val_len / map->format.val_bytes);
|
||||
trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
|
||||
|
||||
ret = map->bus->read(map->bus_context, map->work_buf,
|
||||
map->format.reg_bytes + map->format.pad_bytes,
|
||||
val, val_len);
|
||||
|
||||
trace_regmap_hw_read_done(map->dev, reg,
|
||||
val_len / map->format.val_bytes);
|
||||
trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2123,7 +2119,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
|
|||
dev_info(map->dev, "%x => %x\n", reg, *val);
|
||||
#endif
|
||||
|
||||
trace_regmap_reg_read(map->dev, reg, *val);
|
||||
trace_regmap_reg_read(map, reg, *val);
|
||||
|
||||
if (!map->cache_bypass)
|
||||
regcache_write(map, reg, *val);
|
||||
|
@ -2480,7 +2476,7 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
|
|||
struct regmap *map = async->map;
|
||||
bool wake;
|
||||
|
||||
trace_regmap_async_io_complete(map->dev);
|
||||
trace_regmap_async_io_complete(map);
|
||||
|
||||
spin_lock(&map->async_lock);
|
||||
list_move(&async->list, &map->async_free);
|
||||
|
@ -2525,7 +2521,7 @@ int regmap_async_complete(struct regmap *map)
|
|||
if (!map->bus || !map->bus->async_write)
|
||||
return 0;
|
||||
|
||||
trace_regmap_async_complete_start(map->dev);
|
||||
trace_regmap_async_complete_start(map);
|
||||
|
||||
wait_event(map->async_waitq, regmap_async_is_done(map));
|
||||
|
||||
|
@ -2534,7 +2530,7 @@ int regmap_async_complete(struct regmap *map)
|
|||
map->async_ret = 0;
|
||||
spin_unlock_irqrestore(&map->async_lock, flags);
|
||||
|
||||
trace_regmap_async_complete_done(map->dev);
|
||||
trace_regmap_async_complete_done(map);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -803,10 +803,6 @@ static int __init nbd_init(void)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
|
||||
if (!nbd_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
part_shift = 0;
|
||||
if (max_part > 0) {
|
||||
part_shift = fls(max_part);
|
||||
|
@ -828,6 +824,10 @@ static int __init nbd_init(void)
|
|||
if (nbds_max > 1UL << (MINORBITS - part_shift))
|
||||
return -EINVAL;
|
||||
|
||||
nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL);
|
||||
if (!nbd_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nbds_max; i++) {
|
||||
struct gendisk *disk = alloc_disk(1 << part_shift);
|
||||
if (!disk)
|
||||
|
|
|
@ -3003,6 +3003,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
}
|
||||
get_device(dev->device);
|
||||
|
||||
INIT_LIST_HEAD(&dev->node);
|
||||
INIT_WORK(&dev->probe_work, nvme_async_probe);
|
||||
schedule_work(&dev->probe_work);
|
||||
return 0;
|
||||
|
|
|
@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
|
|||
config SH_TIMER_CMT
|
||||
bool "Renesas CMT timer driver" if COMPILE_TEST
|
||||
depends on GENERIC_CLOCKEVENTS
|
||||
depends on HAS_IOMEM
|
||||
default SYS_SUPPORTS_SH_CMT
|
||||
help
|
||||
This enables build of a clocksource and clockevent driver for
|
||||
|
@ -201,6 +202,7 @@ config SH_TIMER_CMT
|
|||
config SH_TIMER_MTU2
|
||||
bool "Renesas MTU2 timer driver" if COMPILE_TEST
|
||||
depends on GENERIC_CLOCKEVENTS
|
||||
depends on HAS_IOMEM
|
||||
default SYS_SUPPORTS_SH_MTU2
|
||||
help
|
||||
This enables build of a clockevent driver for the Multi-Function
|
||||
|
@ -210,6 +212,7 @@ config SH_TIMER_MTU2
|
|||
config SH_TIMER_TMU
|
||||
bool "Renesas TMU timer driver" if COMPILE_TEST
|
||||
depends on GENERIC_CLOCKEVENTS
|
||||
depends on HAS_IOMEM
|
||||
default SYS_SUPPORTS_SH_TMU
|
||||
help
|
||||
This enables build of a clocksource and clockevent driver for
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/irqreturn.h>
|
||||
#include <linux/reset.h>
|
||||
#include <linux/sched_clock.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
|
|||
.dev_id = &sun5i_clockevent,
|
||||
};
|
||||
|
||||
static u64 sun5i_timer_sched_read(void)
|
||||
{
|
||||
return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1));
|
||||
}
|
||||
|
||||
static void __init sun5i_timer_init(struct device_node *node)
|
||||
{
|
||||
struct reset_control *rstc;
|
||||
|
@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
|
|||
writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
|
||||
timer_base + TIMER_CTL_REG(1));
|
||||
|
||||
sched_clock_register(sun5i_timer_sched_read, 32, rate);
|
||||
clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name,
|
||||
rate, 340, 32, clocksource_mmio_readl_down);
|
||||
|
||||
|
|
|
@ -475,6 +475,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
|
|||
* c->desc is NULL and exit.)
|
||||
*/
|
||||
if (c->desc) {
|
||||
bcm2835_dma_desc_free(&c->desc->vd);
|
||||
c->desc = NULL;
|
||||
bcm2835_dma_abort(c->chan_base);
|
||||
|
||||
|
|
|
@ -511,6 +511,9 @@ static void jz4740_dma_desc_free(struct virt_dma_desc *vdesc)
|
|||
kfree(container_of(vdesc, struct jz4740_dma_desc, vdesc));
|
||||
}
|
||||
|
||||
#define JZ4740_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
|
||||
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
|
||||
|
||||
static int jz4740_dma_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct jz4740_dmaengine_chan *chan;
|
||||
|
@ -548,6 +551,10 @@ static int jz4740_dma_probe(struct platform_device *pdev)
|
|||
dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
|
||||
dd->device_config = jz4740_dma_slave_config;
|
||||
dd->device_terminate_all = jz4740_dma_terminate_all;
|
||||
dd->src_addr_widths = JZ4740_DMA_BUSWIDTHS;
|
||||
dd->dst_addr_widths = JZ4740_DMA_BUSWIDTHS;
|
||||
dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
dd->dev = &pdev->dev;
|
||||
INIT_LIST_HEAD(&dd->channels);
|
||||
|
||||
|
|
|
@ -260,6 +260,13 @@ static int edma_terminate_all(struct dma_chan *chan)
|
|||
*/
|
||||
if (echan->edesc) {
|
||||
int cyclic = echan->edesc->cyclic;
|
||||
|
||||
/*
|
||||
* free the running request descriptor
|
||||
* since it is not in any of the vdesc lists
|
||||
*/
|
||||
edma_desc_free(&echan->edesc->vdesc);
|
||||
|
||||
echan->edesc = NULL;
|
||||
edma_stop(echan->ch_num);
|
||||
/* Move the cyclic channel back to default queue */
|
||||
|
|
|
@ -193,8 +193,10 @@ static int moxart_terminate_all(struct dma_chan *chan)
|
|||
|
||||
spin_lock_irqsave(&ch->vc.lock, flags);
|
||||
|
||||
if (ch->desc)
|
||||
if (ch->desc) {
|
||||
moxart_dma_desc_free(&ch->desc->vd);
|
||||
ch->desc = NULL;
|
||||
}
|
||||
|
||||
ctrl = readl(ch->base + REG_OFF_CTRL);
|
||||
ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
|
||||
|
|
|
@ -981,6 +981,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
|
|||
* c->desc is NULL and exit.)
|
||||
*/
|
||||
if (c->desc) {
|
||||
omap_dma_desc_free(&c->desc->vd);
|
||||
c->desc = NULL;
|
||||
/* Avoid stopping the dma twice */
|
||||
if (!c->paused)
|
||||
|
|
|
@ -334,7 +334,7 @@ static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
|
|||
.xlate = irq_domain_xlate_twocell,
|
||||
};
|
||||
|
||||
static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
|
||||
static struct of_device_id mpc8xxx_gpio_ids[] = {
|
||||
{ .compatible = "fsl,mpc8349-gpio", },
|
||||
{ .compatible = "fsl,mpc8572-gpio", },
|
||||
{ .compatible = "fsl,mpc8610-gpio", },
|
||||
|
|
|
@ -219,7 +219,7 @@ static int syscon_gpio_probe(struct platform_device *pdev)
|
|||
ret = of_property_read_u32_index(np, "gpio,syscon-dev", 2,
|
||||
&priv->dir_reg_offset);
|
||||
if (ret)
|
||||
dev_err(dev, "can't read the dir register offset!\n");
|
||||
dev_dbg(dev, "can't read the dir register offset!\n");
|
||||
|
||||
priv->dir_reg_offset <<= 3;
|
||||
}
|
||||
|
|
|
@ -201,6 +201,10 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
|
|||
if (!handler)
|
||||
return AE_BAD_PARAMETER;
|
||||
|
||||
pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
|
||||
if (pin < 0)
|
||||
return AE_BAD_PARAMETER;
|
||||
|
||||
desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event");
|
||||
if (IS_ERR(desc)) {
|
||||
dev_err(chip->dev, "Failed to request GPIO\n");
|
||||
|
@ -551,6 +555,12 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
|
|||
struct gpio_desc *desc;
|
||||
bool found;
|
||||
|
||||
pin = acpi_gpiochip_pin_to_gpio_offset(chip, pin);
|
||||
if (pin < 0) {
|
||||
status = AE_BAD_PARAMETER;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&achip->conn_lock);
|
||||
|
||||
found = false;
|
||||
|
|
|
@ -525,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
|
|||
}
|
||||
EXPORT_SYMBOL(drm_framebuffer_reference);
|
||||
|
||||
static void drm_framebuffer_free_bug(struct kref *kref)
|
||||
{
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
|
||||
{
|
||||
DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount));
|
||||
kref_put(&fb->refcount, drm_framebuffer_free_bug);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
|
||||
* @fb: fb to unregister
|
||||
|
@ -1320,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane)
|
|||
return;
|
||||
}
|
||||
/* disconnect the plane from the fb and crtc: */
|
||||
__drm_framebuffer_unreference(plane->old_fb);
|
||||
drm_framebuffer_unreference(plane->old_fb);
|
||||
plane->old_fb = NULL;
|
||||
plane->fb = NULL;
|
||||
plane->crtc = NULL;
|
||||
|
|
|
@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|||
|
||||
WARN_ON(i915_verify_lists(ring->dev));
|
||||
|
||||
/* Move any buffers on the active list that are no longer referenced
|
||||
* by the ringbuffer to the flushing/inactive lists as appropriate,
|
||||
* before we free the context associated with the requests.
|
||||
/* Retire requests first as we use it above for the early return.
|
||||
* If we retire requests last, we may use a later seqno and so clear
|
||||
* the requests lists without clearing the active list, leading to
|
||||
* confusion.
|
||||
*/
|
||||
while (!list_empty(&ring->active_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = list_first_entry(&ring->active_list,
|
||||
struct drm_i915_gem_object,
|
||||
ring_list);
|
||||
|
||||
if (!i915_gem_request_completed(obj->last_read_req, true))
|
||||
break;
|
||||
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
}
|
||||
|
||||
|
||||
while (!list_empty(&ring->request_list)) {
|
||||
struct drm_i915_gem_request *request;
|
||||
struct intel_ringbuffer *ringbuf;
|
||||
|
@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
|
|||
i915_gem_free_request(request);
|
||||
}
|
||||
|
||||
/* Move any buffers on the active list that are no longer referenced
|
||||
* by the ringbuffer to the flushing/inactive lists as appropriate,
|
||||
* before we free the context associated with the requests.
|
||||
*/
|
||||
while (!list_empty(&ring->active_list)) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
obj = list_first_entry(&ring->active_list,
|
||||
struct drm_i915_gem_object,
|
||||
ring_list);
|
||||
|
||||
if (!i915_gem_request_completed(obj->last_read_req, true))
|
||||
break;
|
||||
|
||||
i915_gem_object_move_to_inactive(obj);
|
||||
}
|
||||
|
||||
if (unlikely(ring->trace_irq_req &&
|
||||
i915_gem_request_completed(ring->trace_irq_req, true))) {
|
||||
ring->irq_put(ring);
|
||||
|
|
|
@ -2438,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
|
|||
if (!intel_crtc->base.primary->fb)
|
||||
return;
|
||||
|
||||
if (intel_alloc_plane_obj(intel_crtc, plane_config))
|
||||
if (intel_alloc_plane_obj(intel_crtc, plane_config)) {
|
||||
struct drm_plane *primary = intel_crtc->base.primary;
|
||||
|
||||
primary->state->crtc = &intel_crtc->base;
|
||||
primary->crtc = &intel_crtc->base;
|
||||
update_state_fb(primary);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(intel_crtc->base.primary->fb);
|
||||
intel_crtc->base.primary->fb = NULL;
|
||||
|
@ -2462,11 +2469,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc,
|
|||
continue;
|
||||
|
||||
if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
|
||||
struct drm_plane *primary = intel_crtc->base.primary;
|
||||
|
||||
if (obj->tiling_mode != I915_TILING_NONE)
|
||||
dev_priv->preserve_bios_swizzle = true;
|
||||
|
||||
drm_framebuffer_reference(c->primary->fb);
|
||||
intel_crtc->base.primary->fb = c->primary->fb;
|
||||
primary->fb = c->primary->fb;
|
||||
primary->state->crtc = &intel_crtc->base;
|
||||
primary->crtc = &intel_crtc->base;
|
||||
obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
|
||||
break;
|
||||
}
|
||||
|
@ -6663,7 +6674,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
|
|||
plane_config->size);
|
||||
|
||||
crtc->base.primary->fb = fb;
|
||||
update_state_fb(crtc->base.primary);
|
||||
}
|
||||
|
||||
static void chv_crtc_clock_get(struct intel_crtc *crtc,
|
||||
|
@ -7704,7 +7714,6 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
|
|||
plane_config->size);
|
||||
|
||||
crtc->base.primary->fb = fb;
|
||||
update_state_fb(crtc->base.primary);
|
||||
return;
|
||||
|
||||
error:
|
||||
|
@ -7798,7 +7807,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
|
|||
plane_config->size);
|
||||
|
||||
crtc->base.primary->fb = fb;
|
||||
update_state_fb(crtc->base.primary);
|
||||
}
|
||||
|
||||
static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
|
||||
|
|
|
@ -1288,10 +1288,13 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
|
|||
return 0;
|
||||
|
||||
spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
|
||||
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS)
|
||||
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
|
||||
smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
|
||||
ret = arm_smmu_iova_to_phys_hard(domain, iova);
|
||||
else
|
||||
} else {
|
||||
ret = ops->iova_to_phys(ops, iova);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
|
||||
|
||||
return ret;
|
||||
|
@ -1556,7 +1559,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (smmu->version == 1 || (!(id & ID0_ATOSNS) && (id & ID0_S1TS))) {
|
||||
if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
|
||||
smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
|
||||
dev_notice(smmu->dev, "\taddress translation ops\n");
|
||||
}
|
||||
|
|
|
@ -1742,9 +1742,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
|
|||
|
||||
static void domain_exit(struct dmar_domain *domain)
|
||||
{
|
||||
struct dmar_drhd_unit *drhd;
|
||||
struct intel_iommu *iommu;
|
||||
struct page *freelist = NULL;
|
||||
int i;
|
||||
|
||||
/* Domain 0 is reserved, so dont process it */
|
||||
if (!domain)
|
||||
|
@ -1764,8 +1763,8 @@ static void domain_exit(struct dmar_domain *domain)
|
|||
|
||||
/* clear attached or cached domains */
|
||||
rcu_read_lock();
|
||||
for_each_active_iommu(iommu, drhd)
|
||||
iommu_detach_domain(domain, iommu);
|
||||
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
|
||||
iommu_detach_domain(domain, g_iommus[i]);
|
||||
rcu_read_unlock();
|
||||
|
||||
dma_free_pagelist(freelist);
|
||||
|
|
|
@ -851,6 +851,7 @@ static int ipmmu_remove(struct platform_device *pdev)
|
|||
|
||||
static const struct of_device_id ipmmu_of_ids[] = {
|
||||
{ .compatible = "renesas,ipmmu-vmsa", },
|
||||
{ }
|
||||
};
|
||||
|
||||
static struct platform_driver ipmmu_driver = {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config LGUEST
|
||||
tristate "Linux hypervisor example code"
|
||||
depends on X86_32 && EVENTFD && TTY
|
||||
depends on X86_32 && EVENTFD && TTY && PCI_DIRECT
|
||||
select HVC_DRIVER
|
||||
---help---
|
||||
This is a very simple module which allows you to run
|
||||
|
|
|
@ -433,7 +433,6 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
|
|||
|
||||
dm_get(md);
|
||||
atomic_inc(&md->open_count);
|
||||
|
||||
out:
|
||||
spin_unlock(&_minor_lock);
|
||||
|
||||
|
@ -442,16 +441,20 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode)
|
|||
|
||||
static void dm_blk_close(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct mapped_device *md = disk->private_data;
|
||||
struct mapped_device *md;
|
||||
|
||||
spin_lock(&_minor_lock);
|
||||
|
||||
md = disk->private_data;
|
||||
if (WARN_ON(!md))
|
||||
goto out;
|
||||
|
||||
if (atomic_dec_and_test(&md->open_count) &&
|
||||
(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
|
||||
queue_work(deferred_remove_workqueue, &deferred_remove_work);
|
||||
|
||||
dm_put(md);
|
||||
|
||||
out:
|
||||
spin_unlock(&_minor_lock);
|
||||
}
|
||||
|
||||
|
@ -2241,7 +2244,6 @@ static void free_dev(struct mapped_device *md)
|
|||
int minor = MINOR(disk_devt(md->disk));
|
||||
|
||||
unlock_fs(md);
|
||||
bdput(md->bdev);
|
||||
destroy_workqueue(md->wq);
|
||||
|
||||
if (md->kworker_task)
|
||||
|
@ -2252,19 +2254,22 @@ static void free_dev(struct mapped_device *md)
|
|||
mempool_destroy(md->rq_pool);
|
||||
if (md->bs)
|
||||
bioset_free(md->bs);
|
||||
blk_integrity_unregister(md->disk);
|
||||
del_gendisk(md->disk);
|
||||
|
||||
cleanup_srcu_struct(&md->io_barrier);
|
||||
free_table_devices(&md->table_devices);
|
||||
free_minor(minor);
|
||||
dm_stats_cleanup(&md->stats);
|
||||
|
||||
spin_lock(&_minor_lock);
|
||||
md->disk->private_data = NULL;
|
||||
spin_unlock(&_minor_lock);
|
||||
|
||||
if (blk_get_integrity(md->disk))
|
||||
blk_integrity_unregister(md->disk);
|
||||
del_gendisk(md->disk);
|
||||
put_disk(md->disk);
|
||||
blk_cleanup_queue(md->queue);
|
||||
dm_stats_cleanup(&md->stats);
|
||||
bdput(md->bdev);
|
||||
free_minor(minor);
|
||||
|
||||
module_put(THIS_MODULE);
|
||||
kfree(md);
|
||||
}
|
||||
|
@ -2642,8 +2647,9 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
|
|||
|
||||
might_sleep();
|
||||
|
||||
spin_lock(&_minor_lock);
|
||||
map = dm_get_live_table(md, &srcu_idx);
|
||||
|
||||
spin_lock(&_minor_lock);
|
||||
idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
|
||||
set_bit(DMF_FREEING, &md->flags);
|
||||
spin_unlock(&_minor_lock);
|
||||
|
|
|
@ -739,7 +739,7 @@ static int __init kempld_init(void)
|
|||
for (id = kempld_dmi_table;
|
||||
id->matches[0].slot != DMI_NONE; id++)
|
||||
if (strstr(id->ident, force_device_id))
|
||||
if (id->callback && id->callback(id))
|
||||
if (id->callback && !id->callback(id))
|
||||
break;
|
||||
if (id->matches[0].slot == DMI_NONE)
|
||||
return -ENODEV;
|
||||
|
|
|
@ -196,18 +196,27 @@ EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
|
|||
int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
|
||||
{
|
||||
u16 value;
|
||||
u8 *buf;
|
||||
int ret;
|
||||
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
*data = 0;
|
||||
|
||||
buf = kzalloc(sizeof(u8), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
|
||||
value = swab16(addr);
|
||||
|
||||
return usb_control_msg(ucr->pusb_dev,
|
||||
ret = usb_control_msg(ucr->pusb_dev,
|
||||
usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
|
||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
value, 0, data, 1, 100);
|
||||
value, 0, buf, 1, 100);
|
||||
*data = *buf;
|
||||
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
|
||||
|
||||
|
@ -288,18 +297,27 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
|
|||
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
|
||||
{
|
||||
int ret;
|
||||
u16 *buf;
|
||||
|
||||
if (!status)
|
||||
return -EINVAL;
|
||||
|
||||
if (polling_pipe == 0)
|
||||
if (polling_pipe == 0) {
|
||||
buf = kzalloc(sizeof(u16), GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(ucr->pusb_dev,
|
||||
usb_rcvctrlpipe(ucr->pusb_dev, 0),
|
||||
RTSX_USB_REQ_POLL,
|
||||
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
|
||||
0, 0, status, 2, 100);
|
||||
else
|
||||
0, 0, buf, 2, 100);
|
||||
*status = *buf;
|
||||
|
||||
kfree(buf);
|
||||
} else {
|
||||
ret = rtsx_usb_get_status_with_bulk(ucr, status);
|
||||
}
|
||||
|
||||
/* usb_control_msg may return positive when success */
|
||||
if (ret < 0)
|
||||
|
|
|
@ -3881,7 +3881,8 @@ static inline int bond_slave_override(struct bonding *bond,
|
|||
/* Find out if any slaves have the same mapping as this skb. */
|
||||
bond_for_each_slave_rcu(bond, slave, iter) {
|
||||
if (slave->queue_id == skb->queue_mapping) {
|
||||
if (bond_slave_can_tx(slave)) {
|
||||
if (bond_slave_is_up(slave) &&
|
||||
slave->link == BOND_LINK_UP) {
|
||||
bond_dev_queue_xmit(bond, skb, slave->dev);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -592,13 +592,12 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
|
|||
rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
|
||||
CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
|
||||
new_state = max(tx_state, rx_state);
|
||||
} else if (unlikely(flt == FLEXCAN_ESR_FLT_CONF_PASSIVE)) {
|
||||
} else {
|
||||
__flexcan_get_berr_counter(dev, &bec);
|
||||
new_state = CAN_STATE_ERROR_PASSIVE;
|
||||
new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
|
||||
CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
|
||||
rx_state = bec.rxerr >= bec.txerr ? new_state : 0;
|
||||
tx_state = bec.rxerr <= bec.txerr ? new_state : 0;
|
||||
} else {
|
||||
new_state = CAN_STATE_BUS_OFF;
|
||||
}
|
||||
|
||||
/* state hasn't changed */
|
||||
|
@ -1158,12 +1157,19 @@ static int flexcan_probe(struct platform_device *pdev)
|
|||
const struct flexcan_devtype_data *devtype_data;
|
||||
struct net_device *dev;
|
||||
struct flexcan_priv *priv;
|
||||
struct regulator *reg_xceiver;
|
||||
struct resource *mem;
|
||||
struct clk *clk_ipg = NULL, *clk_per = NULL;
|
||||
void __iomem *base;
|
||||
int err, irq;
|
||||
u32 clock_freq = 0;
|
||||
|
||||
reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
|
||||
if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
else if (IS_ERR(reg_xceiver))
|
||||
reg_xceiver = NULL;
|
||||
|
||||
if (pdev->dev.of_node)
|
||||
of_property_read_u32(pdev->dev.of_node,
|
||||
"clock-frequency", &clock_freq);
|
||||
|
@ -1224,9 +1230,7 @@ static int flexcan_probe(struct platform_device *pdev)
|
|||
priv->pdata = dev_get_platdata(&pdev->dev);
|
||||
priv->devtype_data = devtype_data;
|
||||
|
||||
priv->reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
|
||||
if (IS_ERR(priv->reg_xceiver))
|
||||
priv->reg_xceiver = NULL;
|
||||
priv->reg_xceiver = reg_xceiver;
|
||||
|
||||
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
|
||||
|
||||
|
|
|
@ -901,6 +901,8 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
|
|||
}
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
init_usb_anchor(&dev->rx_submitted);
|
||||
|
||||
atomic_set(&dev->active_channels, 0);
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/can/dev.h>
|
||||
#include <linux/can/error.h>
|
||||
|
||||
#define MAX_TX_URBS 16
|
||||
#define MAX_RX_URBS 4
|
||||
#define START_TIMEOUT 1000 /* msecs */
|
||||
#define STOP_TIMEOUT 1000 /* msecs */
|
||||
|
@ -443,6 +442,7 @@ struct kvaser_usb_error_summary {
|
|||
};
|
||||
};
|
||||
|
||||
/* Context for an outstanding, not yet ACKed, transmission */
|
||||
struct kvaser_usb_tx_urb_context {
|
||||
struct kvaser_usb_net_priv *priv;
|
||||
u32 echo_index;
|
||||
|
@ -456,8 +456,13 @@ struct kvaser_usb {
|
|||
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
|
||||
struct usb_anchor rx_submitted;
|
||||
|
||||
/* @max_tx_urbs: Firmware-reported maximum number of oustanding,
|
||||
* not yet ACKed, transmissions on this device. This value is
|
||||
* also used as a sentinel for marking free tx contexts.
|
||||
*/
|
||||
u32 fw_version;
|
||||
unsigned int nchannels;
|
||||
unsigned int max_tx_urbs;
|
||||
enum kvaser_usb_family family;
|
||||
|
||||
bool rxinitdone;
|
||||
|
@ -467,19 +472,18 @@ struct kvaser_usb {
|
|||
|
||||
struct kvaser_usb_net_priv {
|
||||
struct can_priv can;
|
||||
|
||||
spinlock_t tx_contexts_lock;
|
||||
int active_tx_contexts;
|
||||
struct kvaser_usb_tx_urb_context tx_contexts[MAX_TX_URBS];
|
||||
|
||||
struct usb_anchor tx_submitted;
|
||||
struct completion start_comp, stop_comp;
|
||||
struct can_berr_counter bec;
|
||||
|
||||
struct kvaser_usb *dev;
|
||||
struct net_device *netdev;
|
||||
int channel;
|
||||
|
||||
struct can_berr_counter bec;
|
||||
struct completion start_comp, stop_comp;
|
||||
struct usb_anchor tx_submitted;
|
||||
|
||||
spinlock_t tx_contexts_lock;
|
||||
int active_tx_contexts;
|
||||
struct kvaser_usb_tx_urb_context tx_contexts[];
|
||||
};
|
||||
|
||||
static const struct usb_device_id kvaser_usb_table[] = {
|
||||
|
@ -592,8 +596,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
|
|||
* for further details.
|
||||
*/
|
||||
if (tmp->len == 0) {
|
||||
pos = round_up(pos,
|
||||
dev->bulk_in->wMaxPacketSize);
|
||||
pos = round_up(pos, le16_to_cpu(dev->bulk_in->
|
||||
wMaxPacketSize));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -657,9 +661,13 @@ static int kvaser_usb_get_software_info(struct kvaser_usb *dev)
|
|||
switch (dev->family) {
|
||||
case KVASER_LEAF:
|
||||
dev->fw_version = le32_to_cpu(msg.u.leaf.softinfo.fw_version);
|
||||
dev->max_tx_urbs =
|
||||
le16_to_cpu(msg.u.leaf.softinfo.max_outstanding_tx);
|
||||
break;
|
||||
case KVASER_USBCAN:
|
||||
dev->fw_version = le32_to_cpu(msg.u.usbcan.softinfo.fw_version);
|
||||
dev->max_tx_urbs =
|
||||
le16_to_cpu(msg.u.usbcan.softinfo.max_outstanding_tx);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -715,7 +723,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
|
|||
|
||||
stats = &priv->netdev->stats;
|
||||
|
||||
context = &priv->tx_contexts[tid % MAX_TX_URBS];
|
||||
context = &priv->tx_contexts[tid % dev->max_tx_urbs];
|
||||
|
||||
/* Sometimes the state change doesn't come after a bus-off event */
|
||||
if (priv->can.restart_ms &&
|
||||
|
@ -744,7 +752,7 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
|
|||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
|
||||
can_get_echo_skb(priv->netdev, context->echo_index);
|
||||
context->echo_index = MAX_TX_URBS;
|
||||
context->echo_index = dev->max_tx_urbs;
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(priv->netdev);
|
||||
|
||||
|
@ -1329,7 +1337,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
|||
* number of events in case of a heavy rx load on the bus.
|
||||
*/
|
||||
if (msg->len == 0) {
|
||||
pos = round_up(pos, dev->bulk_in->wMaxPacketSize);
|
||||
pos = round_up(pos, le16_to_cpu(dev->bulk_in->
|
||||
wMaxPacketSize));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1512,11 +1521,13 @@ static int kvaser_usb_open(struct net_device *netdev)
|
|||
|
||||
static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv)
|
||||
{
|
||||
int i;
|
||||
int i, max_tx_urbs;
|
||||
|
||||
max_tx_urbs = priv->dev->max_tx_urbs;
|
||||
|
||||
priv->active_tx_contexts = 0;
|
||||
for (i = 0; i < MAX_TX_URBS; i++)
|
||||
priv->tx_contexts[i].echo_index = MAX_TX_URBS;
|
||||
for (i = 0; i < max_tx_urbs; i++)
|
||||
priv->tx_contexts[i].echo_index = max_tx_urbs;
|
||||
}
|
||||
|
||||
/* This method might sleep. Do not call it in the atomic context
|
||||
|
@ -1702,14 +1713,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
*msg_tx_can_flags |= MSG_FLAG_REMOTE_FRAME;
|
||||
|
||||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
for (i = 0; i < ARRAY_SIZE(priv->tx_contexts); i++) {
|
||||
if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) {
|
||||
for (i = 0; i < dev->max_tx_urbs; i++) {
|
||||
if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) {
|
||||
context = &priv->tx_contexts[i];
|
||||
|
||||
context->echo_index = i;
|
||||
can_put_echo_skb(skb, netdev, context->echo_index);
|
||||
++priv->active_tx_contexts;
|
||||
if (priv->active_tx_contexts >= MAX_TX_URBS)
|
||||
if (priv->active_tx_contexts >= dev->max_tx_urbs)
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
break;
|
||||
|
@ -1743,7 +1754,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
|||
spin_lock_irqsave(&priv->tx_contexts_lock, flags);
|
||||
|
||||
can_free_echo_skb(netdev, context->echo_index);
|
||||
context->echo_index = MAX_TX_URBS;
|
||||
context->echo_index = dev->max_tx_urbs;
|
||||
--priv->active_tx_contexts;
|
||||
netif_wake_queue(netdev);
|
||||
|
||||
|
@ -1881,7 +1892,9 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
|
||||
netdev = alloc_candev(sizeof(*priv) +
|
||||
dev->max_tx_urbs * sizeof(*priv->tx_contexts),
|
||||
dev->max_tx_urbs);
|
||||
if (!netdev) {
|
||||
dev_err(&intf->dev, "Cannot alloc candev\n");
|
||||
return -ENOMEM;
|
||||
|
@ -2009,6 +2022,13 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|||
return err;
|
||||
}
|
||||
|
||||
dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
|
||||
((dev->fw_version >> 24) & 0xff),
|
||||
((dev->fw_version >> 16) & 0xff),
|
||||
(dev->fw_version & 0xffff));
|
||||
|
||||
dev_dbg(&intf->dev, "Max oustanding tx = %d URBs\n", dev->max_tx_urbs);
|
||||
|
||||
err = kvaser_usb_get_card_info(dev);
|
||||
if (err) {
|
||||
dev_err(&intf->dev,
|
||||
|
@ -2016,11 +2036,6 @@ static int kvaser_usb_probe(struct usb_interface *intf,
|
|||
return err;
|
||||
}
|
||||
|
||||
dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n",
|
||||
((dev->fw_version >> 24) & 0xff),
|
||||
((dev->fw_version >> 16) & 0xff),
|
||||
(dev->fw_version & 0xffff));
|
||||
|
||||
for (i = 0; i < dev->nchannels; i++) {
|
||||
err = kvaser_usb_init_one(intf, id, i);
|
||||
if (err) {
|
||||
|
|
|
@ -26,8 +26,8 @@
|
|||
#define PUCAN_CMD_FILTER_STD 0x008
|
||||
#define PUCAN_CMD_TX_ABORT 0x009
|
||||
#define PUCAN_CMD_WR_ERR_CNT 0x00a
|
||||
#define PUCAN_CMD_RX_FRAME_ENABLE 0x00b
|
||||
#define PUCAN_CMD_RX_FRAME_DISABLE 0x00c
|
||||
#define PUCAN_CMD_SET_EN_OPTION 0x00b
|
||||
#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
|
||||
#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
|
||||
|
||||
/* uCAN received messages list */
|
||||
|
@ -101,14 +101,15 @@ struct __packed pucan_wr_err_cnt {
|
|||
u16 unused;
|
||||
};
|
||||
|
||||
/* uCAN RX_FRAME_ENABLE command fields */
|
||||
#define PUCAN_FLTEXT_ERROR 0x0001
|
||||
#define PUCAN_FLTEXT_BUSLOAD 0x0002
|
||||
/* uCAN SET_EN/CLR_DIS _OPTION command fields */
|
||||
#define PUCAN_OPTION_ERROR 0x0001
|
||||
#define PUCAN_OPTION_BUSLOAD 0x0002
|
||||
#define PUCAN_OPTION_CANDFDISO 0x0004
|
||||
|
||||
struct __packed pucan_filter_ext {
|
||||
struct __packed pucan_options {
|
||||
__le16 opcode_channel;
|
||||
|
||||
__le16 ext_mask;
|
||||
__le16 options;
|
||||
u32 unused;
|
||||
};
|
||||
|
||||
|
|
|
@ -110,13 +110,13 @@ struct __packed pcan_ufd_led {
|
|||
u8 unused[5];
|
||||
};
|
||||
|
||||
/* Extended usage of uCAN commands CMD_RX_FRAME_xxxABLE for PCAN-USB Pro FD */
|
||||
/* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */
|
||||
#define PCAN_UFD_FLTEXT_CALIBRATION 0x8000
|
||||
|
||||
struct __packed pcan_ufd_filter_ext {
|
||||
struct __packed pcan_ufd_options {
|
||||
__le16 opcode_channel;
|
||||
|
||||
__le16 ext_mask;
|
||||
__le16 ucan_mask;
|
||||
u16 unused;
|
||||
__le16 usb_mask;
|
||||
};
|
||||
|
@ -251,6 +251,27 @@ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
|
|||
/* moves the pointer forward */
|
||||
pc += sizeof(struct pucan_wr_err_cnt);
|
||||
|
||||
/* add command to switch from ISO to non-ISO mode, if fw allows it */
|
||||
if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) {
|
||||
struct pucan_options *puo = (struct pucan_options *)pc;
|
||||
|
||||
puo->opcode_channel =
|
||||
(dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ?
|
||||
pucan_cmd_opcode_channel(dev,
|
||||
PUCAN_CMD_CLR_DIS_OPTION) :
|
||||
pucan_cmd_opcode_channel(dev, PUCAN_CMD_SET_EN_OPTION);
|
||||
|
||||
puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO);
|
||||
|
||||
/* to be sure that no other extended bits will be taken into
|
||||
* account
|
||||
*/
|
||||
puo->unused = 0;
|
||||
|
||||
/* moves the pointer forward */
|
||||
pc += sizeof(struct pucan_options);
|
||||
}
|
||||
|
||||
/* next, go back to operational mode */
|
||||
cmd = (struct pucan_command *)pc;
|
||||
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
|
||||
|
@ -321,21 +342,21 @@ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx,
|
|||
return pcan_usb_fd_send_cmd(dev, cmd);
|
||||
}
|
||||
|
||||
/* set/unset notifications filter:
|
||||
/* set/unset options
|
||||
*
|
||||
* onoff sets(1)/unset(0) notifications
|
||||
* mask each bit defines a kind of notification to set/unset
|
||||
* onoff set(1)/unset(0) options
|
||||
* mask each bit defines a kind of options to set/unset
|
||||
*/
|
||||
static int pcan_usb_fd_set_filter_ext(struct peak_usb_device *dev,
|
||||
bool onoff, u16 ext_mask, u16 usb_mask)
|
||||
static int pcan_usb_fd_set_options(struct peak_usb_device *dev,
|
||||
bool onoff, u16 ucan_mask, u16 usb_mask)
|
||||
{
|
||||
struct pcan_ufd_filter_ext *cmd = pcan_usb_fd_cmd_buffer(dev);
|
||||
struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev);
|
||||
|
||||
cmd->opcode_channel = pucan_cmd_opcode_channel(dev,
|
||||
(onoff) ? PUCAN_CMD_RX_FRAME_ENABLE :
|
||||
PUCAN_CMD_RX_FRAME_DISABLE);
|
||||
(onoff) ? PUCAN_CMD_SET_EN_OPTION :
|
||||
PUCAN_CMD_CLR_DIS_OPTION);
|
||||
|
||||
cmd->ext_mask = cpu_to_le16(ext_mask);
|
||||
cmd->ucan_mask = cpu_to_le16(ucan_mask);
|
||||
cmd->usb_mask = cpu_to_le16(usb_mask);
|
||||
|
||||
/* send the command */
|
||||
|
@ -770,9 +791,9 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
|
|||
&pcan_usb_pro_fd);
|
||||
|
||||
/* enable USB calibration messages */
|
||||
err = pcan_usb_fd_set_filter_ext(dev, 1,
|
||||
PUCAN_FLTEXT_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
err = pcan_usb_fd_set_options(dev, 1,
|
||||
PUCAN_OPTION_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
}
|
||||
|
||||
pdev->usb_if->dev_opened_count++;
|
||||
|
@ -806,9 +827,9 @@ static int pcan_usb_fd_stop(struct peak_usb_device *dev)
|
|||
|
||||
/* turn off special msgs for that interface if no other dev opened */
|
||||
if (pdev->usb_if->dev_opened_count == 1)
|
||||
pcan_usb_fd_set_filter_ext(dev, 0,
|
||||
PUCAN_FLTEXT_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
pcan_usb_fd_set_options(dev, 0,
|
||||
PUCAN_OPTION_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
pdev->usb_if->dev_opened_count--;
|
||||
|
||||
return 0;
|
||||
|
@ -860,8 +881,14 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
|
|||
pdev->usb_if->fw_info.fw_version[2],
|
||||
dev->adapter->ctrl_count);
|
||||
|
||||
/* the currently supported hw is non-ISO */
|
||||
dev->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
|
||||
/* check for ability to switch between ISO/non-ISO modes */
|
||||
if (pdev->usb_if->fw_info.fw_version[0] >= 2) {
|
||||
/* firmware >= 2.x supports ISO/non-ISO switching */
|
||||
dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
|
||||
} else {
|
||||
/* firmware < 2.x only supports fixed(!) non-ISO */
|
||||
dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO;
|
||||
}
|
||||
|
||||
/* tell the hardware the can driver is running */
|
||||
err = pcan_usb_fd_drv_loaded(dev, 1);
|
||||
|
@ -937,9 +964,9 @@ static void pcan_usb_fd_exit(struct peak_usb_device *dev)
|
|||
if (dev->ctrl_idx == 0) {
|
||||
/* turn off calibration message if any device were opened */
|
||||
if (pdev->usb_if->dev_opened_count > 0)
|
||||
pcan_usb_fd_set_filter_ext(dev, 0,
|
||||
PUCAN_FLTEXT_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
pcan_usb_fd_set_options(dev, 0,
|
||||
PUCAN_OPTION_ERROR,
|
||||
PCAN_UFD_FLTEXT_CALIBRATION);
|
||||
|
||||
/* tell USB adapter that the driver is being unloaded */
|
||||
pcan_usb_fd_drv_loaded(dev, 0);
|
||||
|
|
|
@ -1811,7 +1811,7 @@ struct bnx2x {
|
|||
int stats_state;
|
||||
|
||||
/* used for synchronization of concurrent threads statistics handling */
|
||||
spinlock_t stats_lock;
|
||||
struct mutex stats_lock;
|
||||
|
||||
/* used by dmae command loader */
|
||||
struct dmae_command stats_dmae;
|
||||
|
@ -1935,8 +1935,6 @@ struct bnx2x {
|
|||
|
||||
int fp_array_size;
|
||||
u32 dump_preset_idx;
|
||||
bool stats_started;
|
||||
struct semaphore stats_sema;
|
||||
|
||||
u8 phys_port_id[ETH_ALEN];
|
||||
|
||||
|
|
|
@ -129,8 +129,8 @@ struct bnx2x_mac_vals {
|
|||
u32 xmac_val;
|
||||
u32 emac_addr;
|
||||
u32 emac_val;
|
||||
u32 umac_addr;
|
||||
u32 umac_val;
|
||||
u32 umac_addr[2];
|
||||
u32 umac_val[2];
|
||||
u32 bmac_addr;
|
||||
u32 bmac_val[2];
|
||||
};
|
||||
|
@ -7866,6 +7866,20 @@ int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
|
||||
* and boot began, or when kdump kernel was loaded. Either case would invalidate
|
||||
* the addresses of the transaction, resulting in was-error bit set in the pci
|
||||
* causing all hw-to-host pcie transactions to timeout. If this happened we want
|
||||
* to clear the interrupt which detected this from the pglueb and the was done
|
||||
* bit
|
||||
*/
|
||||
static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
|
||||
{
|
||||
if (!CHIP_IS_E1x(bp))
|
||||
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
|
||||
1 << BP_ABS_FUNC(bp));
|
||||
}
|
||||
|
||||
static int bnx2x_init_hw_func(struct bnx2x *bp)
|
||||
{
|
||||
int port = BP_PORT(bp);
|
||||
|
@ -7958,8 +7972,7 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|||
|
||||
bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
|
||||
|
||||
if (!CHIP_IS_E1x(bp))
|
||||
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
|
||||
bnx2x_clean_pglue_errors(bp);
|
||||
|
||||
bnx2x_init_block(bp, BLOCK_ATC, init_phase);
|
||||
bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
|
||||
|
@ -10141,6 +10154,25 @@ static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
|
|||
return base + (BP_ABS_FUNC(bp)) * stride;
|
||||
}
|
||||
|
||||
static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
|
||||
u8 port, u32 reset_reg,
|
||||
struct bnx2x_mac_vals *vals)
|
||||
{
|
||||
u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
|
||||
u32 base_addr;
|
||||
|
||||
if (!(mask & reset_reg))
|
||||
return false;
|
||||
|
||||
BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
|
||||
base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
|
||||
vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
|
||||
vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
|
||||
REG_WR(bp, vals->umac_addr[port], 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
|
||||
struct bnx2x_mac_vals *vals)
|
||||
{
|
||||
|
@ -10149,10 +10181,7 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
|
|||
u8 port = BP_PORT(bp);
|
||||
|
||||
/* reset addresses as they also mark which values were changed */
|
||||
vals->bmac_addr = 0;
|
||||
vals->umac_addr = 0;
|
||||
vals->xmac_addr = 0;
|
||||
vals->emac_addr = 0;
|
||||
memset(vals, 0, sizeof(*vals));
|
||||
|
||||
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
|
||||
|
||||
|
@ -10201,15 +10230,11 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
|
|||
REG_WR(bp, vals->xmac_addr, 0);
|
||||
mac_stopped = true;
|
||||
}
|
||||
mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
|
||||
if (mask & reset_reg) {
|
||||
BNX2X_DEV_INFO("Disable umac Rx\n");
|
||||
base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
|
||||
vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
|
||||
vals->umac_val = REG_RD(bp, vals->umac_addr);
|
||||
REG_WR(bp, vals->umac_addr, 0);
|
||||
mac_stopped = true;
|
||||
}
|
||||
|
||||
mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
|
||||
reset_reg, vals);
|
||||
mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
|
||||
reset_reg, vals);
|
||||
}
|
||||
|
||||
if (mac_stopped)
|
||||
|
@ -10505,8 +10530,11 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
|
|||
/* Close the MAC Rx to prevent BRB from filling up */
|
||||
bnx2x_prev_unload_close_mac(bp, &mac_vals);
|
||||
|
||||
/* close LLH filters towards the BRB */
|
||||
/* close LLH filters for both ports towards the BRB */
|
||||
bnx2x_set_rx_filter(&bp->link_params, 0);
|
||||
bp->link_params.port ^= 1;
|
||||
bnx2x_set_rx_filter(&bp->link_params, 0);
|
||||
bp->link_params.port ^= 1;
|
||||
|
||||
/* Check if the UNDI driver was previously loaded */
|
||||
if (bnx2x_prev_is_after_undi(bp)) {
|
||||
|
@ -10553,8 +10581,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
|
|||
|
||||
if (mac_vals.xmac_addr)
|
||||
REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
|
||||
if (mac_vals.umac_addr)
|
||||
REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
|
||||
if (mac_vals.umac_addr[0])
|
||||
REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
|
||||
if (mac_vals.umac_addr[1])
|
||||
REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
|
||||
if (mac_vals.emac_addr)
|
||||
REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
|
||||
if (mac_vals.bmac_addr) {
|
||||
|
@ -10571,26 +10601,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
|
|||
return bnx2x_prev_mcp_done(bp);
|
||||
}
|
||||
|
||||
/* previous driver DMAE transaction may have occurred when pre-boot stage ended
|
||||
* and boot began, or when kdump kernel was loaded. Either case would invalidate
|
||||
* the addresses of the transaction, resulting in was-error bit set in the pci
|
||||
* causing all hw-to-host pcie transactions to timeout. If this happened we want
|
||||
* to clear the interrupt which detected this from the pglueb and the was done
|
||||
* bit
|
||||
*/
|
||||
static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
|
||||
{
|
||||
if (!CHIP_IS_E1x(bp)) {
|
||||
u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
|
||||
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
|
||||
DP(BNX2X_MSG_SP,
|
||||
"'was error' bit was found to be set in pglueb upon startup. Clearing\n");
|
||||
REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
|
||||
1 << BP_FUNC(bp));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int bnx2x_prev_unload(struct bnx2x *bp)
|
||||
{
|
||||
int time_counter = 10;
|
||||
|
@ -10600,7 +10610,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
|
|||
/* clear hw from errors which may have resulted from an interrupted
|
||||
* dmae transaction.
|
||||
*/
|
||||
bnx2x_prev_interrupted_dmae(bp);
|
||||
bnx2x_clean_pglue_errors(bp);
|
||||
|
||||
/* Release previously held locks */
|
||||
hw_lock_reg = (BP_FUNC(bp) <= 5) ?
|
||||
|
@ -12044,9 +12054,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
|
|||
mutex_init(&bp->port.phy_mutex);
|
||||
mutex_init(&bp->fw_mb_mutex);
|
||||
mutex_init(&bp->drv_info_mutex);
|
||||
mutex_init(&bp->stats_lock);
|
||||
bp->drv_info_mng_owner = false;
|
||||
spin_lock_init(&bp->stats_lock);
|
||||
sema_init(&bp->stats_sema, 1);
|
||||
|
||||
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
|
||||
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
|
||||
|
@ -13673,9 +13682,9 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
|
|||
cancel_delayed_work_sync(&bp->sp_task);
|
||||
cancel_delayed_work_sync(&bp->period_task);
|
||||
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
mutex_lock(&bp->stats_lock);
|
||||
bp->stats_state = STATS_STATE_DISABLED;
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
mutex_unlock(&bp->stats_lock);
|
||||
|
||||
bnx2x_save_statistics(bp);
|
||||
|
||||
|
|
|
@ -2238,7 +2238,9 @@ int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
|
||||
cookie.vf = vf;
|
||||
cookie.state = VF_ACQUIRED;
|
||||
bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
|
||||
rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "set state to acquired\n");
|
||||
|
|
|
@ -123,36 +123,28 @@ static void bnx2x_dp_stats(struct bnx2x *bp)
|
|||
*/
|
||||
static void bnx2x_storm_stats_post(struct bnx2x *bp)
|
||||
{
|
||||
if (!bp->stats_pending) {
|
||||
int rc;
|
||||
int rc;
|
||||
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
if (bp->stats_pending)
|
||||
return;
|
||||
|
||||
if (bp->stats_pending) {
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
return;
|
||||
}
|
||||
bp->fw_stats_req->hdr.drv_stats_counter =
|
||||
cpu_to_le16(bp->stats_counter++);
|
||||
|
||||
bp->fw_stats_req->hdr.drv_stats_counter =
|
||||
cpu_to_le16(bp->stats_counter++);
|
||||
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
|
||||
le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
|
||||
|
||||
DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
|
||||
le16_to_cpu(bp->fw_stats_req->hdr.drv_stats_counter));
|
||||
/* adjust the ramrod to include VF queues statistics */
|
||||
bnx2x_iov_adjust_stats_req(bp);
|
||||
bnx2x_dp_stats(bp);
|
||||
|
||||
/* adjust the ramrod to include VF queues statistics */
|
||||
bnx2x_iov_adjust_stats_req(bp);
|
||||
bnx2x_dp_stats(bp);
|
||||
|
||||
/* send FW stats ramrod */
|
||||
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
|
||||
U64_HI(bp->fw_stats_req_mapping),
|
||||
U64_LO(bp->fw_stats_req_mapping),
|
||||
NONE_CONNECTION_TYPE);
|
||||
if (rc == 0)
|
||||
bp->stats_pending = 1;
|
||||
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
}
|
||||
/* send FW stats ramrod */
|
||||
rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
|
||||
U64_HI(bp->fw_stats_req_mapping),
|
||||
U64_LO(bp->fw_stats_req_mapping),
|
||||
NONE_CONNECTION_TYPE);
|
||||
if (rc == 0)
|
||||
bp->stats_pending = 1;
|
||||
}
|
||||
|
||||
static void bnx2x_hw_stats_post(struct bnx2x *bp)
|
||||
|
@ -221,7 +213,7 @@ static void bnx2x_stats_comp(struct bnx2x *bp)
|
|||
*/
|
||||
|
||||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
{
|
||||
struct dmae_command *dmae;
|
||||
u32 opcode;
|
||||
|
@ -519,7 +511,7 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
|
|||
}
|
||||
|
||||
/* should be called under stats_sema */
|
||||
static void __bnx2x_stats_start(struct bnx2x *bp)
|
||||
static void bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
if (IS_PF(bp)) {
|
||||
if (bp->port.pmf)
|
||||
|
@ -531,34 +523,13 @@ static void __bnx2x_stats_start(struct bnx2x *bp)
|
|||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
}
|
||||
|
||||
bp->stats_started = true;
|
||||
}
|
||||
|
||||
static void bnx2x_stats_start(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
bnx2x_stats_comp(bp);
|
||||
__bnx2x_stats_pmf_update(bp);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_pmf_update(struct bnx2x *bp)
|
||||
{
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
__bnx2x_stats_pmf_update(bp);
|
||||
up(&bp->stats_sema);
|
||||
bnx2x_stats_pmf_update(bp);
|
||||
bnx2x_stats_start(bp);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_restart(struct bnx2x *bp)
|
||||
|
@ -568,11 +539,9 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
|
|||
*/
|
||||
if (IS_VF(bp))
|
||||
return;
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
|
||||
bnx2x_stats_comp(bp);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
bnx2x_stats_start(bp);
|
||||
}
|
||||
|
||||
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
|
||||
|
@ -1246,18 +1215,12 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
{
|
||||
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
|
||||
|
||||
/* we run update from timer context, so give up
|
||||
* if somebody is in the middle of transition
|
||||
*/
|
||||
if (down_trylock(&bp->stats_sema))
|
||||
if (bnx2x_edebug_stats_stopped(bp))
|
||||
return;
|
||||
|
||||
if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
|
||||
goto out;
|
||||
|
||||
if (IS_PF(bp)) {
|
||||
if (*stats_comp != DMAE_COMP_VAL)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (bp->port.pmf)
|
||||
bnx2x_hw_stats_update(bp);
|
||||
|
@ -1267,7 +1230,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
BNX2X_ERR("storm stats were not updated for 3 times\n");
|
||||
bnx2x_panic();
|
||||
}
|
||||
goto out;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
/* vf doesn't collect HW statistics, and doesn't get completions
|
||||
|
@ -1281,7 +1244,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
|
||||
/* vf is done */
|
||||
if (IS_VF(bp))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (netif_msg_timer(bp)) {
|
||||
struct bnx2x_eth_stats *estats = &bp->eth_stats;
|
||||
|
@ -1292,9 +1255,6 @@ static void bnx2x_stats_update(struct bnx2x *bp)
|
|||
|
||||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_storm_stats_post(bp);
|
||||
|
||||
out:
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_port_stats_stop(struct bnx2x *bp)
|
||||
|
@ -1358,12 +1318,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
|
|||
|
||||
static void bnx2x_stats_stop(struct bnx2x *bp)
|
||||
{
|
||||
int update = 0;
|
||||
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
|
||||
bp->stats_started = false;
|
||||
bool update = false;
|
||||
|
||||
bnx2x_stats_comp(bp);
|
||||
|
||||
|
@ -1381,8 +1336,6 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
|
|||
bnx2x_hw_stats_post(bp);
|
||||
bnx2x_stats_comp(bp);
|
||||
}
|
||||
|
||||
up(&bp->stats_sema);
|
||||
}
|
||||
|
||||
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
|
||||
|
@ -1410,18 +1363,28 @@ static const struct {
|
|||
|
||||
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
|
||||
{
|
||||
enum bnx2x_stats_state state;
|
||||
void (*action)(struct bnx2x *bp);
|
||||
enum bnx2x_stats_state state = bp->stats_state;
|
||||
|
||||
if (unlikely(bp->panic))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&bp->stats_lock);
|
||||
state = bp->stats_state;
|
||||
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
|
||||
action = bnx2x_stats_stm[state][event].action;
|
||||
spin_unlock_bh(&bp->stats_lock);
|
||||
/* Statistics update run from timer context, and we don't want to stop
|
||||
* that context in case someone is in the middle of a transition.
|
||||
* For other events, wait a bit until lock is taken.
|
||||
*/
|
||||
if (!mutex_trylock(&bp->stats_lock)) {
|
||||
if (event == STATS_EVENT_UPDATE)
|
||||
return;
|
||||
|
||||
action(bp);
|
||||
DP(BNX2X_MSG_STATS,
|
||||
"Unlikely stats' lock contention [event %d]\n", event);
|
||||
mutex_lock(&bp->stats_lock);
|
||||
}
|
||||
|
||||
bnx2x_stats_stm[state][event].action(bp);
|
||||
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
|
||||
|
||||
mutex_unlock(&bp->stats_lock);
|
||||
|
||||
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
|
||||
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
|
||||
|
@ -1998,13 +1961,34 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
|
|||
}
|
||||
}
|
||||
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie){
|
||||
if (down_timeout(&bp->stats_sema, HZ/10))
|
||||
BNX2X_ERR("Unable to acquire stats lock\n");
|
||||
int bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie)
|
||||
{
|
||||
int cnt = 10, rc = 0;
|
||||
|
||||
/* Wait for statistics to end [while blocking further requests],
|
||||
* then run supplied function 'safely'.
|
||||
*/
|
||||
mutex_lock(&bp->stats_lock);
|
||||
|
||||
bnx2x_stats_comp(bp);
|
||||
while (bp->stats_pending && cnt--)
|
||||
if (bnx2x_storm_stats_update(bp))
|
||||
usleep_range(1000, 2000);
|
||||
if (bp->stats_pending) {
|
||||
BNX2X_ERR("Failed to wait for stats pending to clear [possibly FW is stuck]\n");
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
func_to_exec(cookie);
|
||||
__bnx2x_stats_start(bp);
|
||||
up(&bp->stats_sema);
|
||||
|
||||
out:
|
||||
/* No need to restart statistics - if they're enabled, the timer
|
||||
* will restart the statistics.
|
||||
*/
|
||||
mutex_unlock(&bp->stats_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -539,9 +539,9 @@ struct bnx2x;
|
|||
void bnx2x_memset_stats(struct bnx2x *bp);
|
||||
void bnx2x_stats_init(struct bnx2x *bp);
|
||||
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
|
||||
void bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie);
|
||||
int bnx2x_stats_safe_exec(struct bnx2x *bp,
|
||||
void (func_to_exec)(void *cookie),
|
||||
void *cookie);
|
||||
|
||||
/**
|
||||
* bnx2x_save_statistics - save statistics when unloading.
|
||||
|
|
|
@ -376,8 +376,6 @@ enum {
|
|||
enum {
|
||||
INGQ_EXTRAS = 2, /* firmware event queue and */
|
||||
/* forwarded interrupts */
|
||||
MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
|
||||
+ MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
|
||||
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
|
||||
+ MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
|
||||
};
|
||||
|
@ -623,11 +621,13 @@ struct sge {
|
|||
unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
|
||||
|
||||
unsigned int egr_start;
|
||||
unsigned int egr_sz;
|
||||
unsigned int ingr_start;
|
||||
void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
|
||||
struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
|
||||
DECLARE_BITMAP(starving_fl, MAX_EGRQ);
|
||||
DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
|
||||
unsigned int ingr_sz;
|
||||
void **egr_map; /* qid->queue egress queue map */
|
||||
struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
|
||||
unsigned long *starving_fl;
|
||||
unsigned long *txq_maperr;
|
||||
struct timer_list rx_timer; /* refills starving FLs */
|
||||
struct timer_list tx_timer; /* checks Tx queues */
|
||||
};
|
||||
|
@ -1143,6 +1143,8 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
|
|||
|
||||
unsigned int qtimer_val(const struct adapter *adap,
|
||||
const struct sge_rspq *q);
|
||||
|
||||
int t4_init_devlog_params(struct adapter *adapter);
|
||||
int t4_init_sge_params(struct adapter *adapter);
|
||||
int t4_init_tp_params(struct adapter *adap);
|
||||
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
|
||||
|
|
|
@ -670,9 +670,13 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
|
|||
"0.9375" };
|
||||
|
||||
int i;
|
||||
u16 incr[NMTUS][NCCTRL_WIN];
|
||||
u16 (*incr)[NCCTRL_WIN];
|
||||
struct adapter *adap = seq->private;
|
||||
|
||||
incr = kmalloc(sizeof(*incr) * NMTUS, GFP_KERNEL);
|
||||
if (!incr)
|
||||
return -ENOMEM;
|
||||
|
||||
t4_read_cong_tbl(adap, incr);
|
||||
|
||||
for (i = 0; i < NCCTRL_WIN; ++i) {
|
||||
|
@ -685,6 +689,8 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v)
|
|||
adap->params.a_wnd[i],
|
||||
dec_fac[adap->params.b_wnd[i]]);
|
||||
}
|
||||
|
||||
kfree(incr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -920,7 +920,7 @@ static void quiesce_rx(struct adapter *adap)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
|
||||
for (i = 0; i < adap->sge.ingr_sz; i++) {
|
||||
struct sge_rspq *q = adap->sge.ingr_map[i];
|
||||
|
||||
if (q && q->handler) {
|
||||
|
@ -934,6 +934,21 @@ static void quiesce_rx(struct adapter *adap)
|
|||
}
|
||||
}
|
||||
|
||||
/* Disable interrupt and napi handler */
|
||||
static void disable_interrupts(struct adapter *adap)
|
||||
{
|
||||
if (adap->flags & FULL_INIT_DONE) {
|
||||
t4_intr_disable(adap);
|
||||
if (adap->flags & USING_MSIX) {
|
||||
free_msix_queue_irqs(adap);
|
||||
free_irq(adap->msix_info[0].vec, adap);
|
||||
} else {
|
||||
free_irq(adap->pdev->irq, adap);
|
||||
}
|
||||
quiesce_rx(adap);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable NAPI scheduling and interrupt generation for all Rx queues.
|
||||
*/
|
||||
|
@ -941,7 +956,7 @@ static void enable_rx(struct adapter *adap)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
|
||||
for (i = 0; i < adap->sge.ingr_sz; i++) {
|
||||
struct sge_rspq *q = adap->sge.ingr_map[i];
|
||||
|
||||
if (!q)
|
||||
|
@ -992,8 +1007,8 @@ static int setup_sge_queues(struct adapter *adap)
|
|||
int err, msi_idx, i, j;
|
||||
struct sge *s = &adap->sge;
|
||||
|
||||
bitmap_zero(s->starving_fl, MAX_EGRQ);
|
||||
bitmap_zero(s->txq_maperr, MAX_EGRQ);
|
||||
bitmap_zero(s->starving_fl, s->egr_sz);
|
||||
bitmap_zero(s->txq_maperr, s->egr_sz);
|
||||
|
||||
if (adap->flags & USING_MSIX)
|
||||
msi_idx = 1; /* vector 0 is for non-queue interrupts */
|
||||
|
@ -1005,6 +1020,19 @@ static int setup_sge_queues(struct adapter *adap)
|
|||
msi_idx = -((int)s->intrq.abs_id + 1);
|
||||
}
|
||||
|
||||
/* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
|
||||
* don't forget to update the following which need to be
|
||||
* synchronized to and changes here.
|
||||
*
|
||||
* 1. The calculations of MAX_INGQ in cxgb4.h.
|
||||
*
|
||||
* 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
|
||||
* to accommodate any new/deleted Ingress Queues
|
||||
* which need MSI-X Vectors.
|
||||
*
|
||||
* 3. Update sge_qinfo_show() to include information on the
|
||||
* new/deleted queues.
|
||||
*/
|
||||
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
|
||||
msi_idx, NULL, fwevtq_handler);
|
||||
if (err) {
|
||||
|
@ -4246,19 +4274,12 @@ static int cxgb_up(struct adapter *adap)
|
|||
|
||||
static void cxgb_down(struct adapter *adapter)
|
||||
{
|
||||
t4_intr_disable(adapter);
|
||||
cancel_work_sync(&adapter->tid_release_task);
|
||||
cancel_work_sync(&adapter->db_full_task);
|
||||
cancel_work_sync(&adapter->db_drop_task);
|
||||
adapter->tid_release_task_busy = false;
|
||||
adapter->tid_release_head = NULL;
|
||||
|
||||
if (adapter->flags & USING_MSIX) {
|
||||
free_msix_queue_irqs(adapter);
|
||||
free_irq(adapter->msix_info[0].vec, adapter);
|
||||
} else
|
||||
free_irq(adapter->pdev->irq, adapter);
|
||||
quiesce_rx(adapter);
|
||||
t4_sge_stop(adapter);
|
||||
t4_free_sge_resources(adapter);
|
||||
adapter->flags &= ~FULL_INIT_DONE;
|
||||
|
@ -4739,8 +4760,9 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
|
||||
0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
|
||||
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
|
||||
MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
|
||||
FW_CMD_CAP_PF);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -5094,10 +5116,15 @@ static int adap_init0(struct adapter *adap)
|
|||
enum dev_state state;
|
||||
u32 params[7], val[7];
|
||||
struct fw_caps_config_cmd caps_cmd;
|
||||
struct fw_devlog_cmd devlog_cmd;
|
||||
u32 devlog_meminfo;
|
||||
int reset = 1;
|
||||
|
||||
/* Grab Firmware Device Log parameters as early as possible so we have
|
||||
* access to it for debugging, etc.
|
||||
*/
|
||||
ret = t4_init_devlog_params(adap);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* Contact FW, advertising Master capability */
|
||||
ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
|
||||
if (ret < 0) {
|
||||
|
@ -5175,30 +5202,6 @@ static int adap_init0(struct adapter *adap)
|
|||
if (ret < 0)
|
||||
goto bye;
|
||||
|
||||
/* Read firmware device log parameters. We really need to find a way
|
||||
* to get these parameters initialized with some default values (which
|
||||
* are likely to be correct) for the case where we either don't
|
||||
* attache to the firmware or it's crashed when we probe the adapter.
|
||||
* That way we'll still be able to perform early firmware startup
|
||||
* debugging ... If the request to get the Firmware's Device Log
|
||||
* parameters fails, we'll live so we don't make that a fatal error.
|
||||
*/
|
||||
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
|
||||
devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
|
||||
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
||||
devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
|
||||
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
|
||||
&devlog_cmd);
|
||||
if (ret == 0) {
|
||||
devlog_meminfo =
|
||||
ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
|
||||
adap->params.devlog.memtype =
|
||||
FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
|
||||
adap->params.devlog.start =
|
||||
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
|
||||
adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find out what ports are available to us. Note that we need to do
|
||||
* this before calling adap_init0_no_config() since it needs nports
|
||||
|
@ -5299,6 +5302,51 @@ static int adap_init0(struct adapter *adap)
|
|||
adap->tids.nftids = val[4] - val[3] + 1;
|
||||
adap->sge.ingr_start = val[5];
|
||||
|
||||
/* qids (ingress/egress) returned from firmware can be anywhere
|
||||
* in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
|
||||
* Hence driver needs to allocate memory for this range to
|
||||
* store the queue info. Get the highest IQFLINT/EQ index returned
|
||||
* in FW_EQ_*_CMD.alloc command.
|
||||
*/
|
||||
params[0] = FW_PARAM_PFVF(EQ_END);
|
||||
params[1] = FW_PARAM_PFVF(IQFLINT_END);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
|
||||
if (ret < 0)
|
||||
goto bye;
|
||||
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
|
||||
adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
|
||||
|
||||
adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
|
||||
sizeof(*adap->sge.egr_map), GFP_KERNEL);
|
||||
if (!adap->sge.egr_map) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
|
||||
sizeof(*adap->sge.ingr_map), GFP_KERNEL);
|
||||
if (!adap->sge.ingr_map) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
/* Allocate the memory for the vaious egress queue bitmaps
|
||||
* ie starving_fl and txq_maperr.
|
||||
*/
|
||||
adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (!adap->sge.starving_fl) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
|
||||
sizeof(long), GFP_KERNEL);
|
||||
if (!adap->sge.txq_maperr) {
|
||||
ret = -ENOMEM;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
params[0] = FW_PARAM_PFVF(CLIP_START);
|
||||
params[1] = FW_PARAM_PFVF(CLIP_END);
|
||||
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
|
||||
|
@ -5507,6 +5555,10 @@ static int adap_init0(struct adapter *adap)
|
|||
* happened to HW/FW, stop issuing commands.
|
||||
*/
|
||||
bye:
|
||||
kfree(adap->sge.egr_map);
|
||||
kfree(adap->sge.ingr_map);
|
||||
kfree(adap->sge.starving_fl);
|
||||
kfree(adap->sge.txq_maperr);
|
||||
if (ret != -ETIMEDOUT && ret != -EIO)
|
||||
t4_fw_bye(adap, adap->mbox);
|
||||
return ret;
|
||||
|
@ -5534,6 +5586,7 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
|
|||
netif_carrier_off(dev);
|
||||
}
|
||||
spin_unlock(&adap->stats_lock);
|
||||
disable_interrupts(adap);
|
||||
if (adap->flags & FULL_INIT_DONE)
|
||||
cxgb_down(adap);
|
||||
rtnl_unlock();
|
||||
|
@ -5942,6 +5995,10 @@ static void free_some_resources(struct adapter *adapter)
|
|||
|
||||
t4_free_mem(adapter->l2t);
|
||||
t4_free_mem(adapter->tids.tid_tab);
|
||||
kfree(adapter->sge.egr_map);
|
||||
kfree(adapter->sge.ingr_map);
|
||||
kfree(adapter->sge.starving_fl);
|
||||
kfree(adapter->sge.txq_maperr);
|
||||
disable_msi(adapter);
|
||||
|
||||
for_each_port(adapter, i)
|
||||
|
@ -6267,6 +6324,8 @@ static void remove_one(struct pci_dev *pdev)
|
|||
if (is_offload(adapter))
|
||||
detach_ulds(adapter);
|
||||
|
||||
disable_interrupts(adapter);
|
||||
|
||||
for_each_port(adapter, i)
|
||||
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdev(adapter->port[i]);
|
||||
|
|
|
@ -2239,7 +2239,7 @@ static void sge_rx_timer_cb(unsigned long data)
|
|||
struct adapter *adap = (struct adapter *)data;
|
||||
struct sge *s = &adap->sge;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
|
||||
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
|
||||
for (m = s->starving_fl[i]; m; m &= m - 1) {
|
||||
struct sge_eth_rxq *rxq;
|
||||
unsigned int id = __ffs(m) + i * BITS_PER_LONG;
|
||||
|
@ -2327,7 +2327,7 @@ static void sge_tx_timer_cb(unsigned long data)
|
|||
struct adapter *adap = (struct adapter *)data;
|
||||
struct sge *s = &adap->sge;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
|
||||
for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
|
||||
for (m = s->txq_maperr[i]; m; m &= m - 1) {
|
||||
unsigned long id = __ffs(m) + i * BITS_PER_LONG;
|
||||
struct sge_ofld_txq *txq = s->egr_map[id];
|
||||
|
@ -2809,7 +2809,8 @@ void t4_free_sge_resources(struct adapter *adap)
|
|||
free_rspq_fl(adap, &adap->sge.intrq, NULL);
|
||||
|
||||
/* clear the reverse egress queue map */
|
||||
memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
|
||||
memset(adap->sge.egr_map, 0,
|
||||
adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
|
||||
}
|
||||
|
||||
void t4_sge_start(struct adapter *adap)
|
||||
|
|
|
@ -4458,6 +4458,59 @@ int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_init_devlog_params - initialize adapter->params.devlog
|
||||
* @adap: the adapter
|
||||
*
|
||||
* Initialize various fields of the adapter's Firmware Device Log
|
||||
* Parameters structure.
|
||||
*/
|
||||
int t4_init_devlog_params(struct adapter *adap)
|
||||
{
|
||||
struct devlog_params *dparams = &adap->params.devlog;
|
||||
u32 pf_dparams;
|
||||
unsigned int devlog_meminfo;
|
||||
struct fw_devlog_cmd devlog_cmd;
|
||||
int ret;
|
||||
|
||||
/* If we're dealing with newer firmware, the Device Log Paramerters
|
||||
* are stored in a designated register which allows us to access the
|
||||
* Device Log even if we can't talk to the firmware.
|
||||
*/
|
||||
pf_dparams =
|
||||
t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
|
||||
if (pf_dparams) {
|
||||
unsigned int nentries, nentries128;
|
||||
|
||||
dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
|
||||
dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
|
||||
|
||||
nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
|
||||
nentries = (nentries128 + 1) * 128;
|
||||
dparams->size = nentries * sizeof(struct fw_devlog_e);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Otherwise, ask the firmware for it's Device Log Parameters.
|
||||
*/
|
||||
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
|
||||
devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
|
||||
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
||||
devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
|
||||
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
|
||||
&devlog_cmd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
devlog_meminfo = ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
|
||||
dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
|
||||
dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
|
||||
dparams->size = ntohl(devlog_cmd.memsize_devlog);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* t4_init_sge_params - initialize adap->params.sge
|
||||
* @adapter: the adapter
|
||||
|
|
|
@ -63,6 +63,8 @@
|
|||
#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
|
||||
#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
|
||||
|
||||
#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
|
||||
|
||||
#define SGE_PF_KDOORBELL_A 0x0
|
||||
|
||||
#define QID_S 15
|
||||
|
@ -707,6 +709,7 @@
|
|||
#define PFNUM_V(x) ((x) << PFNUM_S)
|
||||
|
||||
#define PCIE_FW_A 0x30b8
|
||||
#define PCIE_FW_PF_A 0x30bc
|
||||
|
||||
#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A 0x5908
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ enum fw_wr_opcodes {
|
|||
FW_RI_BIND_MW_WR = 0x18,
|
||||
FW_RI_FR_NSMR_WR = 0x19,
|
||||
FW_RI_INV_LSTAG_WR = 0x1a,
|
||||
FW_LASTC2E_WR = 0x40
|
||||
FW_LASTC2E_WR = 0x70
|
||||
};
|
||||
|
||||
struct fw_wr_hdr {
|
||||
|
@ -993,6 +993,7 @@ enum fw_memtype_cf {
|
|||
FW_MEMTYPE_CF_EXTMEM = 0x2,
|
||||
FW_MEMTYPE_CF_FLASH = 0x4,
|
||||
FW_MEMTYPE_CF_INTERNAL = 0x5,
|
||||
FW_MEMTYPE_CF_EXTMEM1 = 0x6,
|
||||
};
|
||||
|
||||
struct fw_caps_config_cmd {
|
||||
|
@ -1035,6 +1036,7 @@ enum fw_params_mnem {
|
|||
FW_PARAMS_MNEM_PFVF = 2, /* function params */
|
||||
FW_PARAMS_MNEM_REG = 3, /* limited register access */
|
||||
FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
|
||||
FW_PARAMS_MNEM_CHNET = 5, /* chnet params */
|
||||
FW_PARAMS_MNEM_LAST
|
||||
};
|
||||
|
||||
|
@ -3102,7 +3104,8 @@ enum fw_devlog_facility {
|
|||
FW_DEVLOG_FACILITY_FCOE = 0x2E,
|
||||
FW_DEVLOG_FACILITY_FOISCSI = 0x30,
|
||||
FW_DEVLOG_FACILITY_FOFCOE = 0x32,
|
||||
FW_DEVLOG_FACILITY_MAX = 0x32,
|
||||
FW_DEVLOG_FACILITY_CHNET = 0x34,
|
||||
FW_DEVLOG_FACILITY_MAX = 0x34,
|
||||
};
|
||||
|
||||
/* log message format */
|
||||
|
@ -3139,4 +3142,36 @@ struct fw_devlog_cmd {
|
|||
(((x) >> FW_DEVLOG_CMD_MEMADDR16_DEVLOG_S) & \
|
||||
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_M)
|
||||
|
||||
/* P C I E F W P F 7 R E G I S T E R */
|
||||
|
||||
/* PF7 stores the Firmware Device Log parameters which allows Host Drivers to
|
||||
* access the "devlog" which needing to contact firmware. The encoding is
|
||||
* mostly the same as that returned by the DEVLOG command except for the size
|
||||
* which is encoded as the number of entries in multiples-1 of 128 here rather
|
||||
* than the memory size as is done in the DEVLOG command. Thus, 0 means 128
|
||||
* and 15 means 2048. This of course in turn constrains the allowed values
|
||||
* for the devlog size ...
|
||||
*/
|
||||
#define PCIE_FW_PF_DEVLOG 7
|
||||
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_S 28
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_M 0xf
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_V(x) \
|
||||
((x) << PCIE_FW_PF_DEVLOG_NENTRIES128_S)
|
||||
#define PCIE_FW_PF_DEVLOG_NENTRIES128_G(x) \
|
||||
(((x) >> PCIE_FW_PF_DEVLOG_NENTRIES128_S) & \
|
||||
PCIE_FW_PF_DEVLOG_NENTRIES128_M)
|
||||
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_S 4
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_M 0xffffff
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_V(x) ((x) << PCIE_FW_PF_DEVLOG_ADDR16_S)
|
||||
#define PCIE_FW_PF_DEVLOG_ADDR16_G(x) \
|
||||
(((x) >> PCIE_FW_PF_DEVLOG_ADDR16_S) & PCIE_FW_PF_DEVLOG_ADDR16_M)
|
||||
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_S 0
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_M 0xf
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_V(x) ((x) << PCIE_FW_PF_DEVLOG_MEMTYPE_S)
|
||||
#define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \
|
||||
(((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M)
|
||||
|
||||
#endif /* _T4FW_INTERFACE_H_ */
|
||||
|
|
|
@ -36,13 +36,13 @@
|
|||
#define __T4FW_VERSION_H__
|
||||
|
||||
#define T4FW_VERSION_MAJOR 0x01
|
||||
#define T4FW_VERSION_MINOR 0x0C
|
||||
#define T4FW_VERSION_MICRO 0x19
|
||||
#define T4FW_VERSION_MINOR 0x0D
|
||||
#define T4FW_VERSION_MICRO 0x20
|
||||
#define T4FW_VERSION_BUILD 0x00
|
||||
|
||||
#define T5FW_VERSION_MAJOR 0x01
|
||||
#define T5FW_VERSION_MINOR 0x0C
|
||||
#define T5FW_VERSION_MICRO 0x19
|
||||
#define T5FW_VERSION_MINOR 0x0D
|
||||
#define T5FW_VERSION_MICRO 0x20
|
||||
#define T5FW_VERSION_BUILD 0x00
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1004,7 +1004,7 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
|
|||
? (tq->pidx - 1)
|
||||
: (tq->size - 1));
|
||||
__be64 *src = (__be64 *)&tq->desc[index];
|
||||
__be64 __iomem *dst = (__be64 *)(tq->bar2_addr +
|
||||
__be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
|
||||
SGE_UDB_WCDOORBELL);
|
||||
unsigned int count = EQ_UNIT / sizeof(__be64);
|
||||
|
||||
|
@ -1018,7 +1018,11 @@ static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
|
|||
* DMA.
|
||||
*/
|
||||
while (count) {
|
||||
writeq(*src, dst);
|
||||
/* the (__force u64) is because the compiler
|
||||
* doesn't understand the endian swizzling
|
||||
* going on
|
||||
*/
|
||||
writeq((__force u64)*src, dst);
|
||||
src++;
|
||||
dst++;
|
||||
count--;
|
||||
|
@ -1252,8 +1256,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
|
||||
wr = (void *)&txq->q.desc[txq->q.pidx];
|
||||
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
|
||||
wr->r3[0] = cpu_to_be64(0);
|
||||
wr->r3[1] = cpu_to_be64(0);
|
||||
wr->r3[0] = cpu_to_be32(0);
|
||||
wr->r3[1] = cpu_to_be32(0);
|
||||
skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
|
||||
end = (u64 *)wr + flits;
|
||||
|
||||
|
|
|
@ -210,10 +210,10 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
|
|||
|
||||
if (rpl) {
|
||||
/* request bit in high-order BE word */
|
||||
WARN_ON((be32_to_cpu(*(const u32 *)cmd)
|
||||
WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
|
||||
& FW_CMD_REQUEST_F) == 0);
|
||||
get_mbox_rpl(adapter, rpl, size, mbox_data);
|
||||
WARN_ON((be32_to_cpu(*(u32 *)rpl)
|
||||
WARN_ON((be32_to_cpu(*(__be32 *)rpl)
|
||||
& FW_CMD_REQUEST_F) != 0);
|
||||
}
|
||||
t4_write_reg(adapter, mbox_ctl,
|
||||
|
@ -484,7 +484,7 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
|
|||
* o The BAR2 Queue ID.
|
||||
* o The BAR2 Queue ID Offset into the BAR2 page.
|
||||
*/
|
||||
bar2_page_offset = ((qid >> qpp_shift) << page_shift);
|
||||
bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
|
||||
bar2_qid = qid & qpp_mask;
|
||||
bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
|
||||
|
||||
|
|
|
@ -1954,6 +1954,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
|||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
struct device_node *node;
|
||||
int err = -ENXIO, i;
|
||||
u32 mii_speed, holdtime;
|
||||
|
||||
/*
|
||||
* The i.MX28 dual fec interfaces are not equal.
|
||||
|
@ -1991,10 +1992,33 @@ static int fec_enet_mii_init(struct platform_device *pdev)
|
|||
* Reference Manual has an error on this, and gets fixed on i.MX6Q
|
||||
* document.
|
||||
*/
|
||||
fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
|
||||
mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
|
||||
if (fep->quirks & FEC_QUIRK_ENET_MAC)
|
||||
fep->phy_speed--;
|
||||
fep->phy_speed <<= 1;
|
||||
mii_speed--;
|
||||
if (mii_speed > 63) {
|
||||
dev_err(&pdev->dev,
|
||||
"fec clock (%lu) to fast to get right mii speed\n",
|
||||
clk_get_rate(fep->clk_ipg));
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* The i.MX28 and i.MX6 types have another filed in the MSCR (aka
|
||||
* MII_SPEED) register that defines the MDIO output hold time. Earlier
|
||||
* versions are RAZ there, so just ignore the difference and write the
|
||||
* register always.
|
||||
* The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
|
||||
* HOLDTIME + 1 is the number of clk cycles the fec is holding the
|
||||
* output.
|
||||
* The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
|
||||
* Given that ceil(clkrate / 5000000) <= 64, the calculation for
|
||||
* holdtime cannot result in a value greater than 3.
|
||||
*/
|
||||
holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
|
||||
|
||||
fep->phy_speed = mii_speed << 1 | holdtime << 8;
|
||||
|
||||
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
|
||||
|
||||
fep->mii_bus = mdiobus_alloc();
|
||||
|
|
|
@ -3893,6 +3893,9 @@ static int ucc_geth_probe(struct platform_device* ofdev)
|
|||
ugeth->phy_interface = phy_interface;
|
||||
ugeth->max_speed = max_speed;
|
||||
|
||||
/* Carrier starts down, phylib will bring it up */
|
||||
netif_carrier_off(dev);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
if (netif_msg_probe(ugeth))
|
||||
|
|
|
@ -2002,7 +2002,6 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
|
|||
goto reset_slave;
|
||||
slave_state[slave].vhcr_dma = ((u64) param) << 48;
|
||||
priv->mfunc.master.slave_state[slave].cookie = 0;
|
||||
mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
|
||||
break;
|
||||
case MLX4_COMM_CMD_VHCR1:
|
||||
if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
|
||||
|
@ -2234,6 +2233,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
|
|||
for (i = 0; i < dev->num_slaves; ++i) {
|
||||
s_state = &priv->mfunc.master.slave_state[i];
|
||||
s_state->last_cmd = MLX4_COMM_CMD_RESET;
|
||||
mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
|
||||
for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
|
||||
s_state->event_eq[j].eqn = -1;
|
||||
__raw_writel((__force u32) 0,
|
||||
|
|
|
@ -2917,13 +2917,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
netif_carrier_off(dev);
|
||||
mlx4_en_set_default_moderation(priv);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
en_err(priv, "Netdev registration failed for port %d\n", port);
|
||||
goto out;
|
||||
}
|
||||
priv->registered = 1;
|
||||
|
||||
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
|
||||
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
|
||||
|
||||
|
@ -2969,6 +2962,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
mdev->profile.prof[priv->port].tx_ppp,
|
||||
mdev->profile.prof[priv->port].tx_pause);
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err) {
|
||||
en_err(priv, "Netdev registration failed for port %d\n", port);
|
||||
goto out;
|
||||
}
|
||||
|
||||
priv->registered = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
|
|
@ -153,12 +153,10 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
|
|||
|
||||
/* All active slaves need to receive the event */
|
||||
if (slave == ALL_SLAVES) {
|
||||
for (i = 0; i < dev->num_slaves; i++) {
|
||||
if (i != dev->caps.function &&
|
||||
master->slave_state[i].active)
|
||||
if (mlx4_GEN_EQE(dev, i, eqe))
|
||||
mlx4_warn(dev, "Failed to generate event for slave %d\n",
|
||||
i);
|
||||
for (i = 0; i <= dev->persist->num_vfs; i++) {
|
||||
if (mlx4_GEN_EQE(dev, i, eqe))
|
||||
mlx4_warn(dev, "Failed to generate event for slave %d\n",
|
||||
i);
|
||||
}
|
||||
} else {
|
||||
if (mlx4_GEN_EQE(dev, slave, eqe))
|
||||
|
@ -203,13 +201,11 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
|
|||
struct mlx4_eqe *eqe)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
struct mlx4_slave_state *s_slave =
|
||||
&priv->mfunc.master.slave_state[slave];
|
||||
|
||||
if (!s_slave->active) {
|
||||
/*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
|
||||
if (slave < 0 || slave > dev->persist->num_vfs ||
|
||||
slave == dev->caps.function ||
|
||||
!priv->mfunc.master.slave_state[slave].active)
|
||||
return;
|
||||
}
|
||||
|
||||
slave_event(dev, slave, eqe);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue