Merge remote-tracking branch 'tip/x86/urgent' into efi-for-mingo

Conflicts:
	arch/x86/include/asm/efi.h
This commit is contained in:
Matt Fleming 2014-03-05 17:22:57 +00:00
commit 4fd69331ad
534 changed files with 5402 additions and 3358 deletions

View File

@ -3,8 +3,7 @@ Date: Nov 2010
Contact: Kay Sievers <kay.sievers@vrfy.org>
Description:
Shows the list of currently configured
tty devices used for the console,
like 'tty1 ttyS0'.
console devices, like 'tty1 ttyS0'.
The last entry in the file is the active
device connected to /dev/console.
The file supports poll() to detect virtual

View File

@ -82,7 +82,19 @@ Most of the hard work is done for the driver in the PCI layer. It simply
has to request that the PCI layer set up the MSI capability for this
device.
4.2.1 pci_enable_msi_range
4.2.1 pci_enable_msi
int pci_enable_msi(struct pci_dev *dev)
A successful call allocates ONE interrupt to the device, regardless
of how many MSIs the device supports. The device is switched from
pin-based interrupt mode to MSI mode. The dev->irq number is changed
to a new number which represents the message signaled interrupt;
consequently, this function should be called before the driver calls
request_irq(), because an MSI is delivered via a vector that is
different from the vector of a pin-based interrupt.
4.2.2 pci_enable_msi_range
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
@ -147,6 +159,11 @@ static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
return pci_enable_msi_range(pdev, nvec, nvec);
}
Note, unlike pci_enable_msi_exact() function, which could be also used to
enable a particular number of MSI-X interrupts, pci_enable_msi_range()
returns either a negative errno or 'nvec' (not negative errno or 0 - as
pci_enable_msi_exact() does).
4.2.1.3 Single MSI mode
The most notorious example of the request type described above is
@ -158,7 +175,27 @@ static int foo_driver_enable_single_msi(struct pci_dev *pdev)
return pci_enable_msi_range(pdev, 1, 1);
}
4.2.2 pci_disable_msi
Note, unlike pci_enable_msi() function, which could be also used to
enable the single MSI mode, pci_enable_msi_range() returns either a
negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
does).
4.2.3 pci_enable_msi_exact
int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
This variation on pci_enable_msi_range() call allows a device driver to
request exactly 'nvec' MSIs.
If this function returns a negative number, it indicates an error and
the driver should not attempt to request any more MSI interrupts for
this device.
By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
returns zero in case of success, which indicates MSI interrupts have been
successfully allocated.
4.2.4 pci_disable_msi
void pci_disable_msi(struct pci_dev *dev)
@ -172,7 +209,7 @@ on any interrupt for which it previously called request_irq().
Failure to do so results in a BUG_ON(), leaving the device with
MSI enabled and thus leaking its vector.
4.2.3 pci_msi_vec_count
4.2.4 pci_msi_vec_count
int pci_msi_vec_count(struct pci_dev *dev)
@ -257,8 +294,8 @@ possible, likely up to the limit returned by pci_msix_vec_count() function:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
1, nvec);
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1, nvec);
}
Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
@ -269,8 +306,8 @@ In this case the function could look like this:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
FOO_DRIVER_MINIMUM_NVEC, nvec);
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
FOO_DRIVER_MINIMUM_NVEC, nvec);
}
4.3.1.2 Exact number of MSI-X interrupts
@ -282,10 +319,15 @@ parameters:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
nvec, nvec);
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
nvec, nvec);
}
Note, unlike pci_enable_msix_exact() function, which could be also used to
enable a particular number of MSI-X interrupts, pci_enable_msix_range()
returns either a negative errno or 'nvec' (not negative errno or 0 - as
pci_enable_msix_exact() does).
4.3.1.3 Specific requirements to the number of MSI-X interrupts
As noted above, there could be devices that can not operate with just any
@ -332,7 +374,64 @@ Note how pci_enable_msix_range() return value is analized for a fallback -
any error code other than -ENOSPC indicates a fatal error and should not
be retried.
4.3.2 pci_disable_msix
4.3.2 pci_enable_msix_exact
int pci_enable_msix_exact(struct pci_dev *dev,
struct msix_entry *entries, int nvec)
This variation on pci_enable_msix_range() call allows a device driver to
request exactly 'nvec' MSI-Xs.
If this function returns a negative number, it indicates an error and
the driver should not attempt to allocate any more MSI-X interrupts for
this device.
By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
returns zero in case of success, which indicates MSI-X interrupts have been
successfully allocated.
Another version of a routine that enables MSI-X mode for a device with
specific requirements described in chapter 4.3.1.3 might look like this:
/*
* Assume 'minvec' and 'maxvec' are non-zero
*/
static int foo_driver_enable_msix(struct foo_adapter *adapter,
int minvec, int maxvec)
{
int rc;
minvec = roundup_pow_of_two(minvec);
maxvec = rounddown_pow_of_two(maxvec);
if (minvec > maxvec)
return -ERANGE;
retry:
rc = pci_enable_msix_exact(adapter->pdev,
adapter->msix_entries, maxvec);
/*
* -ENOSPC is the only error code allowed to be analyzed
*/
if (rc == -ENOSPC) {
if (maxvec == 1)
return -ENOSPC;
maxvec /= 2;
if (minvec > maxvec)
return -ENOSPC;
goto retry;
} else if (rc < 0) {
return rc;
}
return maxvec;
}
4.3.3 pci_disable_msix
void pci_disable_msix(struct pci_dev *dev)

View File

@ -91,7 +91,7 @@ Boards:
compatible = "ti,omap3-beagle", "ti,omap3"
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3"
compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3"
- OMAP4 SDP : Software Development Board
compatible = "ti,omap4-sdp", "ti,omap4430"

View File

@ -1,12 +1,16 @@
* Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
Required properties:
- compatible : Should be "fsl,imx31-sdma", "fsl,imx31-to1-sdma",
"fsl,imx31-to2-sdma", "fsl,imx35-sdma", "fsl,imx35-to1-sdma",
"fsl,imx35-to2-sdma", "fsl,imx51-sdma", "fsl,imx53-sdma" or
"fsl,imx6q-sdma". The -to variants should be preferred since they
allow to determnine the correct ROM script addresses needed for
the driver to work without additional firmware.
- compatible : Should be one of
"fsl,imx25-sdma"
"fsl,imx31-sdma", "fsl,imx31-to1-sdma", "fsl,imx31-to2-sdma"
"fsl,imx35-sdma", "fsl,imx35-to1-sdma", "fsl,imx35-to2-sdma"
"fsl,imx51-sdma"
"fsl,imx53-sdma"
"fsl,imx6q-sdma"
The -to variants should be preferred since they allow to determnine the
correct ROM script addresses needed for the driver to work without additional
firmware.
- reg : Should contain SDMA registers location and length
- interrupts : Should contain SDMA interrupt
- #dma-cells : Must be <3>.

View File

@ -0,0 +1,58 @@
STMicroelectronics SoC DWMAC glue layer controller
The device node has following properties.
Required properties:
- compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or
"st,stid127-dwmac".
- reg : Offset of the glue configuration register map in system
configuration regmap pointed by st,syscon property and size.
- reg-names : Should be "sti-ethconf".
- st,syscon : Should be phandle to system configuration node which
encompases this glue registers.
- st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
wired up in from different sources. One via TXCLK pin and other via CLK_125
pin. This wiring is totally board dependent. However the retiming glue
logic should be configured accordingly. Possible values for this property
"txclk" - if 125Mhz clock is wired up via txclk line.
"clk_125" - if 125Mhz clock is wired up via clk_125 line.
This property is only valid for Giga bit setup( GMII, RGMII), and it is
un-used for non-giga bit (MII and RMII) setups. Also note that internal
clockgen can not generate stable 125Mhz clock.
- st,ext-phyclk: This boolean property indicates who is generating the clock
for tx and rx. This property is only valid for RMII case where the clock can
be generated from the MAC or PHY.
- clock-names: should be "sti-ethclk".
- clocks: Should point to ethernet clockgen which can generate phyclk.
Example:
ethernet0: dwmac@fe810000 {
device_type = "network";
compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
reg-names = "stmmaceth", "sti-ethconf";
interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
phy-mode = "mii";
st,syscon = <&syscfg_rear>;
snps,pbl = <32>;
snps,mixed-burst;
resets = <&softreset STIH416_ETH0_SOFTRESET>;
reset-names = "stmmaceth";
pinctrl-0 = <&pinctrl_mii0>;
pinctrl-names = "default";
clocks = <&CLK_S_GMAC0_PHY>;
clock-names = "stmmaceth";
};

View File

@ -1,45 +0,0 @@
The 3Com Etherlink Plus (3c505) driver.
This driver now uses DMA. There is currently no support for PIO operation.
The default DMA channel is 6; this is _not_ autoprobed, so you must
make sure you configure it correctly. If loading the driver as a
module, you can do this with "modprobe 3c505 dma=n". If the driver is
linked statically into the kernel, you must either use an "ether="
statement on the command line, or change the definition of ELP_DMA in 3c505.h.
The driver will warn you if it has to fall back on the compiled in
default DMA channel.
If no base address is given at boot time, the driver will autoprobe
ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
will try to probe for it.
The driver can be used as a loadable module.
Theoretically, one instance of the driver can now run multiple cards,
in the standard way (when loading a module, say "modprobe 3c505
io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested
this, though.
The driver may now support revision 2 hardware; the dependency on
being able to read the host control register has been removed. This
is also untested, since I don't have a suitable card.
Known problems:
I still see "DMA upload timed out" messages from time to time. These
seem to be fairly non-fatal though.
The card is old and slow.
To do:
Improve probe/setup code
Test multicast and promiscuous operation
Authors:
The driver is mainly written by Craig Southeren, email
<craigs@ineluki.apana.org.au>.
Parts of the driver (adapting the driver to 1.1.4+ kernels,
IRQ/address detection, some changes) and this README by
Juha Laiho <jlaiho@ichaos.nullnet.fi>.
DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
Multicard support, Software configurable DMA, etc., by
Christopher Collins <ccollins@pcug.org.au>

View File

@ -538,7 +538,7 @@ F: arch/alpha/
ALTERA UART/JTAG UART SERIAL DRIVERS
M: Tobias Klauser <tklauser@distanz.ch>
L: linux-serial@vger.kernel.org
L: nios2-dev@sopc.et.ntust.edu.tw (moderated for non-subscribers)
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
F: drivers/tty/serial/altera_uart.c
F: drivers/tty/serial/altera_jtaguart.c
@ -1860,6 +1860,7 @@ F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
M: Christian Daudt <bcm@fixthebug.org>
M: Matt Porter <mporter@linaro.org>
L: bcm-kernel-feedback-list@broadcom.com
T: git git://git.github.com/broadcom/bcm11351
S: Maintained
@ -2408,8 +2409,10 @@ F: tools/power/cpupower/
CPUSETS
M: Li Zefan <lizefan@huawei.com>
L: cgroups@vger.kernel.org
W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
F: Documentation/cgroups/cpusets.txt
F: include/linux/cpuset.h
@ -2608,9 +2611,9 @@ DC395x SCSI driver
M: Oliver Neukum <oliver@neukum.org>
M: Ali Akcaagac <aliakc@web.de>
M: Jamie Lenehan <lenehan@twibble.org>
W: http://twibble.org/dist/dc395x/
L: dc395x@twibble.org
L: http://lists.twibble.org/mailman/listinfo/dc395x/
W: http://twibble.org/dist/dc395x/
W: http://lists.twibble.org/mailman/listinfo/dc395x/
S: Maintained
F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.*
@ -2845,12 +2848,22 @@ F: lib/kobj*
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
L: dri-devel@lists.freedesktop.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: include/drm/
F: include/uapi/drm/
RADEON DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
F: drivers/gpu/drm/radeon/
F: include/drm/radeon*
F: include/uapi/drm/radeon*
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
M: Jani Nikula <jani.nikula@linux.intel.com>
@ -3324,6 +3337,17 @@ S: Maintained
F: include/linux/netfilter_bridge/
F: net/bridge/
ETHERNET PHY LIBRARY
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: include/linux/phy.h
F: include/linux/phy_fixed.h
F: drivers/net/phy/
F: Documentation/networking/phy.txt
F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.cz>
L: linux-ext4@vger.kernel.org
@ -5487,6 +5511,11 @@ W: http://www.kernel.org/doc/man-pages
L: linux-man@vger.kernel.org
S: Maintained
MARVELL ARMADA DRM SUPPORT
M: Russell King <rmk+kernel@arm.linux.org.uk>
S: Maintained
F: drivers/gpu/drm/armada/
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
M: Stephen Hemminger <stephen@networkplumber.org>
@ -8429,8 +8458,8 @@ TARGET SUBSYSTEM
M: Nicholas A. Bellinger <nab@linux-iscsi.org>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
W: http://groups.google.com/group/linux-iscsi-target-dev
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
S: Supported
F: drivers/target/
@ -9715,7 +9744,6 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM
P: Silicon Graphics Inc
M: Dave Chinner <david@fromorbit.com>
M: Ben Myers <bpm@sgi.com>
M: xfs@oss.sgi.com
L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc5
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@ -605,10 +605,11 @@ endif
ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
stackp-flag := -fstack-protector
ifeq ($(call cc-option, $(stackp-flag)),)
$(warning Cannot use CONFIG_CC_STACKPROTECTOR: \
-fstack-protector not supported by compiler))
$(warning Cannot use CONFIG_CC_STACKPROTECTOR_REGULAR: \
-fstack-protector not supported by compiler)
endif
else ifdef CONFIG_CC_STACKPROTECTOR_STRONG
else
ifdef CONFIG_CC_STACKPROTECTOR_STRONG
stackp-flag := -fstack-protector-strong
ifeq ($(call cc-option, $(stackp-flag)),)
$(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \
@ -618,6 +619,7 @@ else
# Force off for distro compilers that enable stack protector by default.
stackp-flag := $(call cc-option, -fno-stack-protector)
endif
endif
KBUILD_CFLAGS += $(stackp-flag)
# This warning generated too much noise in a regular build.

View File

@ -209,7 +209,8 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
omap3-n900.dtb \
omap3-n9.dtb \
omap3-n950.dtb \
omap3-tobi.dtb \
omap3-overo-tobi.dtb \
omap3-overo-storm-tobi.dtb \
omap3-gta04.dtb \
omap3-igep0020.dtb \
omap3-igep0030.dtb \

View File

@ -121,7 +121,7 @@ sound {
ti,model = "AM335x-EVMSK";
ti,audio-codec = <&tlv320aic3106>;
ti,mcasp-controller = <&mcasp1>;
ti,codec-clock-rate = <24576000>;
ti,codec-clock-rate = <24000000>;
ti,audio-routing =
"Headphone Jack", "HPLOUT",
"Headphone Jack", "HPROUT";
@ -256,6 +256,12 @@ davinci_mdio_sleep: davinci_mdio_sleep {
>;
};
mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = <
0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
>;
};
mcasp1_pins: mcasp1_pins {
pinctrl-single,pins = <
0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
@ -456,6 +462,9 @@ &mmc1 {
status = "okay";
vmmc-supply = <&vmmc_reg>;
bus-width = <4>;
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
};
&sham {

View File

@ -23,6 +23,7 @@ aliases {
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
eth3 = &eth3;
};
cpus {
@ -291,7 +292,7 @@ gpio2: gpio@18180 {
interrupts = <91>;
};
ethernet@34000 {
eth3: ethernet@34000 {
compatible = "marvell,armada-370-neta";
reg = <0x34000 0x4000>;
interrupts = <14>;

View File

@ -379,15 +379,6 @@ gate_clk: clock-gating-ctrl@d0038 {
#clock-cells = <1>;
};
pmu_intc: pmu-interrupt-ctrl@d0050 {
compatible = "marvell,dove-pmu-intc";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0xd0050 0x8>;
interrupts = <33>;
marvell,#interrupts = <7>;
};
pinctrl: pin-ctrl@d0200 {
compatible = "marvell,dove-pinctrl";
reg = <0xd0200 0x10>;
@ -610,8 +601,6 @@ gpio1: gpio-ctrl@d0420 {
rtc: real-time-clock@d8500 {
compatible = "marvell,orion-rtc";
reg = <0xd8500 0x20>;
interrupt-parent = <&pmu_intc>;
interrupts = <5>;
};
gpio2: gpio-ctrl@e8400 {

View File

@ -52,12 +52,6 @@ reg_usbotg_vbus: usb-otg-vbus {
};
};
codec: spdif-transmitter {
compatible = "linux,spdif-dit";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hummingboard_spdif>;
};
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@ -111,7 +105,7 @@ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
};
pinctrl_hummingboard_spdif: hummingboard-spdif {
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
@ -142,6 +136,8 @@ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
};
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hummingboard_spdif>;
status = "okay";
};

View File

@ -46,12 +46,6 @@ reg_usbotg_vbus: usb-otg-vbus {
};
};
codec: spdif-transmitter {
compatible = "linux,spdif-dit";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_cubox_i_spdif>;
};
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@ -89,7 +83,7 @@ MX6QDL_PAD_EIM_DA9__GPIO3_IO09 0x80000000
};
pinctrl_cubox_i_spdif: cubox-i-spdif {
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
@ -121,6 +115,8 @@ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059
};
&spdif {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_cubox_i_spdif>;
status = "okay";
};

View File

@ -32,7 +32,7 @@ gpio-keys {
aux-button {
label = "aux";
linux,code = <169>;
gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
gpio-key,wakeup;
};
};
@ -92,6 +92,8 @@ &i2c2 {
bmp085@77 {
compatible = "bosch,bmp085";
reg = <0x77>;
interrupt-parent = <&gpio4>;
interrupts = <17 IRQ_TYPE_EDGE_RISING>;
};
/* leds */
@ -141,8 +143,8 @@ &mmc1 {
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
vmmc-supply = <&vmmc1>;
vmmc_aux-supply = <&vsim>;
bus-width = <4>;
ti,non-removable;
};
&mmc2 {

View File

@ -14,5 +14,5 @@
/ {
model = "Nokia N9";
compatible = "nokia,omap3-n9", "ti,omap3";
compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
};

View File

@ -1,6 +1,6 @@
/*
* Copyright (C) 2013 Pavel Machek <pavel@ucw.cz>
* Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi>
* Copyright (C) 2013-2014 Aaro Koskinen <aaro.koskinen@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 (or later) as
@ -13,7 +13,7 @@
/ {
model = "Nokia N900";
compatible = "nokia,omap3-n900", "ti,omap3";
compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
cpus {
cpu@0 {

View File

@ -14,5 +14,5 @@
/ {
model = "Nokia N950";
compatible = "nokia,omap3-n950", "ti,omap3";
compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
};

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Tobi expansion board is manufactured by Gumstix Inc.
*/
/dts-v1/;
#include "omap36xx.dtsi"
#include "omap3-overo-tobi-common.dtsi"
/ {
model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi";
compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
};

View File

@ -13,9 +13,6 @@
#include "omap3-overo.dtsi"
/ {
model = "TI OMAP3 Gumstix Overo on Tobi";
compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3";
leds {
compatible = "gpio-leds";
heartbeat {

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* Tobi expansion board is manufactured by Gumstix Inc.
*/
/dts-v1/;
#include "omap34xx.dtsi"
#include "omap3-overo-tobi-common.dtsi"
/ {
model = "OMAP35xx Gumstix Overo on Tobi";
compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3";
};

View File

@ -9,9 +9,6 @@
/*
* The Gumstix Overo must be combined with an expansion board.
*/
/dts-v1/;
#include "omap34xx.dtsi"
/ {
pwmleds {

View File

@ -57,6 +57,8 @@ dc@54200000 {
resets = <&tegra_car 27>;
reset-names = "dc";
nvidia,head = <0>;
rgb {
status = "disabled";
};
@ -72,6 +74,8 @@ dc@54240000 {
resets = <&tegra_car 26>;
reset-names = "dc";
nvidia,head = <1>;
rgb {
status = "disabled";
};

View File

@ -94,6 +94,8 @@ dc@54200000 {
resets = <&tegra_car 27>;
reset-names = "dc";
nvidia,head = <0>;
rgb {
status = "disabled";
};
@ -109,6 +111,8 @@ dc@54240000 {
resets = <&tegra_car 26>;
reset-names = "dc";
nvidia,head = <1>;
rgb {
status = "disabled";
};

View File

@ -28,7 +28,7 @@ / {
compatible = "nvidia,cardhu", "nvidia,tegra30";
aliases {
rtc0 = "/i2c@7000d000/tps6586x@34";
rtc0 = "/i2c@7000d000/tps65911@2d";
rtc1 = "/rtc@7000e000";
};

View File

@ -170,6 +170,8 @@ dc@54200000 {
resets = <&tegra_car 27>;
reset-names = "dc";
nvidia,head = <0>;
rgb {
status = "disabled";
};
@ -185,6 +187,8 @@ dc@54240000 {
resets = <&tegra_car 26>;
reset-names = "dc";
nvidia,head = <1>;
rgb {
status = "disabled";
};

View File

@ -1,2 +0,0 @@
/include/ "tests-phandle.dtsi"
/include/ "tests-interrupts.dtsi"

View File

@ -1,4 +1,4 @@
/include/ "versatile-ab.dts"
#include <versatile-ab.dts>
/ {
model = "ARM Versatile PB";
@ -47,4 +47,4 @@ mmc@b000 {
};
};
/include/ "testcases/tests.dtsi"
#include <testcases.dtsi>

View File

@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
dsb();
}
/*

View File

@ -120,13 +120,16 @@
/*
* 2nd stage PTE definitions for LPAE.
*/
#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.

View File

@ -37,18 +37,9 @@
static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
"dsb ishst\n"
SEV
);
#else
__asm__ __volatile__ (
"mcr p15, 0, %0, c7, c10, 4\n"
SEV
: : "r" (0)
);
#endif
dsb(ishst);
__asm__(SEV);
}
/*

View File

@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
res = memblock_virt_alloc_low(sizeof(*res), 0);
res = memblock_virt_alloc(sizeof(*res), 0);
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;

View File

@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd,
void *v)
{
if (cmd == CPU_PM_EXIT) {
if (cmd == CPU_PM_EXIT &&
__hyp_get_vectors() == hyp_default_vectors) {
cpu_init_hyp_mode(NULL);
return NOTIFY_OK;
}

View File

@ -220,6 +220,10 @@ after_vfp_restore:
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 and r1.
*
* A function pointer with a value of 0xffffffff has a special meaning,
* and is used to implement __hyp_get_vectors in the same way as in
* arch/arm/kernel/hyp_stub.S.
*
* The calling convention follows the standard AAPCS:
* r0 - r3: caller save
* r12: caller save
@ -363,6 +367,11 @@ hyp_hvc:
host_switch_to_hyp:
pop {r0, r1, r2}
/* Check for __hyp_get_vectors */
cmp r0, #-1
mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
beq 1f
push {lr}
mrs lr, SPSR
push {lr}
@ -378,7 +387,7 @@ THUMB( orr lr, #1)
pop {lr}
msr SPSR_csxf, lr
pop {lr}
eret
1: eret
guest_trap:
load_vcpu @ Load VCPU pointer to r0

View File

@ -101,11 +101,9 @@ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
# i.MX6SL reuses i.MX6Q code
obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o
endif
# i.MX5 based machines
obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o

View File

@ -144,13 +144,11 @@ void imx6q_set_chicken_bit(void);
void imx_cpu_die(unsigned int cpu);
int imx_cpu_kill(unsigned int cpu);
#ifdef CONFIG_PM
void imx6q_pm_init(void);
void imx6q_pm_set_ccm_base(void __iomem *base);
#ifdef CONFIG_PM
void imx5_pm_init(void);
#else
static inline void imx6q_pm_init(void) {}
static inline void imx6q_pm_set_ccm_base(void __iomem *base) {}
static inline void imx5_pm_init(void) {}
#endif

View File

@ -156,6 +156,7 @@ static struct omap_usb_config nokia770_usb_config __initdata = {
.register_dev = 1,
.hmc_mode = 16,
.pins[0] = 6,
.extcon = "tahvo-usb",
};
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)

View File

@ -50,6 +50,7 @@ config SOC_OMAP5
bool "TI OMAP5"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select ARM_GIC
select CPU_V7
@ -63,6 +64,7 @@ config SOC_AM33XX
bool "TI AM33XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select CPU_V7
select MULTI_IRQ_HANDLER
@ -72,6 +74,7 @@ config SOC_AM43XX
depends on ARCH_MULTI_V7
select CPU_V7
select ARCH_OMAP2PLUS
select ARCH_HAS_OPP
select MULTI_IRQ_HANDLER
select ARM_GIC
select MACH_OMAP_GENERIC
@ -80,6 +83,7 @@ config SOC_DRA7XX
bool "TI DRA7XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select ARM_GIC
select CPU_V7
@ -268,9 +272,6 @@ config MACH_OMAP_3430SDP
default y
select OMAP_PACKAGE_CBB
config MACH_NOKIA_N800
bool
config MACH_NOKIA_N810
bool
@ -281,7 +282,6 @@ config MACH_NOKIA_N8X0
bool "Nokia N800/N810"
depends on SOC_OMAP2420
default y
select MACH_NOKIA_N800
select MACH_NOKIA_N810
select MACH_NOKIA_N810_WIMAX
select OMAP_PACKAGE_ZAC

View File

@ -1339,7 +1339,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
#ifdef CONFIG_MTD_NAND
#if IS_ENABLED(CONFIG_MTD_NAND)
static const char * const nand_xfer_types[] = {
[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
@ -1429,7 +1429,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
}
#endif
#ifdef CONFIG_MTD_ONENAND
#if IS_ENABLED(CONFIG_MTD_ONENAND)
static int gpmc_probe_onenand_child(struct platform_device *pdev,
struct device_node *child)
{

View File

@ -179,15 +179,6 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
.length = L4_EMU_34XX_SIZE,
.type = MT_DEVICE
},
#if defined(CONFIG_DEBUG_LL) && \
(defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
{
.virtual = ZOOM_UART_VIRT,
.pfn = __phys_to_pfn(ZOOM_UART_BASE),
.length = SZ_1M,
.type = MT_DEVICE
},
#endif
};
#endif

View File

@ -38,6 +38,7 @@
#include <linux/mtd/physmap.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/reboot.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/max1586.h>
#include <linux/slab.h>
#include <linux/i2c/pxa-i2c.h>
@ -714,6 +715,10 @@ static struct gpio global_gpios[] = {
{ GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" },
};
static struct regulator_consumer_supply fixed_5v0_consumers[] = {
REGULATOR_SUPPLY("power", "pwm-backlight"),
};
static void __init mioa701_machine_init(void)
{
int rc;
@ -753,6 +758,10 @@ static void __init mioa701_machine_init(void)
pxa_set_i2c_info(&i2c_pdata);
pxa27x_set_i2c_power_info(NULL);
pxa_set_camera_info(&mioa701_pxacamera_platform_data);
regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers,
ARRAY_SIZE(fixed_5v0_consumers),
5000000);
}
static void mioa701_machine_exit(void)

View File

@ -24,6 +24,7 @@
#include <linux/cpu_pm.h>
#include <linux/suspend.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/clk/tegra.h>
#include <asm/smp_plat.h>

View File

@ -73,10 +73,20 @@ u32 tegra_uart_config[3] = {
static void __init tegra_init_cache(void)
{
#ifdef CONFIG_CACHE_L2X0
static const struct of_device_id pl310_ids[] __initconst = {
{ .compatible = "arm,pl310-cache", },
{}
};
struct device_node *np;
int ret;
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
u32 aux_ctrl, cache_type;
np = of_find_matching_node(NULL, pl310_ids);
if (!np)
return;
cache_type = readl(p + L2X0_CACHE_TYPE);
aux_ctrl = (cache_type & 0x700) << (17-8);
aux_ctrl |= 0x7C400001;

View File

@ -1358,7 +1358,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
if (gfp & GFP_ATOMIC)
if (!(gfp & __GFP_WAIT))
return __iommu_alloc_atomic(dev, size, handle);
/*

View File

@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
pteval_t prot_pte_s2;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;

View File

@ -232,12 +232,16 @@ __setup("noalign", noalign_setup);
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
.prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
s2_policy(L_PTE_S2_MT_DEV_SHARED) |
L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@ -508,7 +512,8 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
* ARMv6 and above have extended page tables.

View File

@ -208,7 +208,6 @@ __v6_setup:
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
@ -218,6 +217,8 @@ __v6_setup:
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
@ complete invalidations
adr r5, v6_crval
ldmia r5, {r5, r6}
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables

View File

@ -351,7 +351,6 @@ __v7_setup:
4: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
@ -360,6 +359,7 @@ __v7_setup:
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
dsb @ Complete invalidations
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field

View File

@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
#ifdef CONFIG_SMP
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
}
#define __my_cpu_offset __my_cpu_offset()
#else /* !CONFIG_SMP */
#define set_my_cpu_offset(x) do { } while (0)
#endif /* CONFIG_SMP */
#include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */

View File

@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
#define pte_young(pte) (pte_val(pte) & PTE_AF)
#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_user(pte) \

View File

@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 8);
/*
* -4 here because we care about the PC at time of bl,
* not where the return will go.
*/
frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0;
}

View File

@ -694,6 +694,24 @@ __hyp_panic_str:
.align 2
/*
* u64 kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
* only, since the stack will change between the caller and the callee.
*
* Call the function with the first argument containing a pointer to the
* function you wish to call in Hyp mode, and subsequent arguments will be
* passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
* function pointer can be passed). The function being called must be mapped
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 and r1.
*
* A function pointer with a value of 0 has a special meaning, and is
* used to implement __hyp_get_vectors in the same way as in
* arch/arm64/kernel/hyp_stub.S.
*/
ENTRY(kvm_call_hyp)
hvc #0
ret
@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2
pop x2, x3
pop x0, x1
push lr, xzr
/* Check for __hyp_get_vectors */
cbnz x0, 1f
mrs x0, vbar_el2
b 2f
1: push lr, xzr
/*
* Compute the function address in EL2, and shuffle the parameters.
@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2
blr lr
pop lr, xzr
eret
2: eret
el1_trap:
/*

View File

@ -11,7 +11,7 @@ all: uImage vmlinux.elf
KBUILD_DEFCONFIG := atstk1002_defconfig
KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
KBUILD_AFLAGS += -mrelax -mno-pic
KBUILD_CFLAGS_MODULE += -mno-relax
LDFLAGS_vmlinux += --relax

View File

@ -11,6 +11,7 @@
#define FRAM_VERSION "1.0"
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/io.h>

View File

@ -17,5 +17,6 @@ generic-y += scatterlist.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
generic-y += vga.h
generic-y += xor.h
generic-y += hash.h

View File

@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr);
#define iounmap(addr) \
__iounmap(addr)
#define ioremap_wc ioremap_nocache
#define cached(addr) P1SEGADDR(addr)
#define uncached(addr) P2SEGADDR(addr)

View File

@ -1,4 +1,4 @@
generic-y += barrier.h
generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += cputime.h
@ -6,6 +6,7 @@ generic-y += device.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
@ -18,6 +19,7 @@ generic-y += local.h
generic-y += mman.h
generic-y += mutex.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
@ -31,5 +33,3 @@ generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
generic-y += preempt.h
generic-y += hash.h

View File

@ -1,8 +0,0 @@
#ifndef _M68K_BARRIER_H
#define _M68K_BARRIER_H
#define nop() do { asm volatile ("nop"); barrier(); } while (0)
#include <asm-generic/barrier.h>
#endif /* _M68K_BARRIER_H */

View File

@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
#define NR_syscalls 349
#define NR_syscalls 351
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT

View File

@ -354,5 +354,7 @@
#define __NR_process_vm_writev 346
#define __NR_kcmp 347
#define __NR_finit_module 348
#define __NR_sched_setattr 349
#define __NR_sched_getattr 350
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */

View File

@ -369,4 +369,6 @@ ENTRY(sys_call_table)
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
.long sys_sched_setattr
.long sys_sched_getattr /* 350 */

View File

@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len)
/*
* We can't access below the stack pointer in the 32bit ABI and
* can access 288 bytes in the 64bit ABI
* can access 288 bytes in the 64bit big-endian ABI,
* or 512 bytes with the new ELFv2 little-endian ABI.
*/
if (!is_32bit_task())
usp -= 288;
usp -= USER_REDZONE_SIZE;
return (void __user *) (usp - len);
}

View File

@ -172,10 +172,20 @@ struct eeh_ops {
};
extern struct eeh_ops *eeh_ops;
extern int eeh_subsystem_enabled;
extern bool eeh_subsystem_enabled;
extern raw_spinlock_t confirm_error_lock;
extern int eeh_probe_mode;
static inline bool eeh_enabled(void)
{
return eeh_subsystem_enabled;
}
static inline void eeh_set_enable(bool mode)
{
eeh_subsystem_enabled = mode;
}
#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
/*
* Reads from a device which has been isolated by EEH will return
@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *);
#else /* !CONFIG_EEH */
static inline bool eeh_enabled(void)
{
return false;
}
static inline void eeh_set_enable(bool mode) { }
static inline int eeh_init(void)
{
return 0;

View File

@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_PPC64
return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
#else
return __pte(pte_update(ptep, ~0UL, 0));
#endif

View File

@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void);
int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t data, uint32_t sz);

View File

@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
unsigned long set,
int huge)
{
#ifdef PTE_ATOMIC_UPDATES
@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
: "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
unsigned long old = pte_val(*ptep);
*ptep = __pte(old & ~clr);
*ptep = __pte((old & ~clr) | set);
#endif
/* huge pages use the old page table lock */
if (!huge)
@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_RW, 0);
pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
pte_update(mm, addr, ptep, _PAGE_RW, 1);
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
/*
@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
return __pte(old);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t * ptep)
{
pte_update(mm, addr, ptep, ~0UL, 0);
pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,
extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
unsigned long addr,
pmd_t *pmdp, unsigned long clr);
pmd_t *pmdp,
unsigned long clr,
unsigned long set);
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
}
@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
return;
pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
}
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH

View File

@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte;
}
#define ptep_set_numa ptep_set_numa
static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
VM_BUG_ON(1);
pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
return;
}
#define pmd_numa pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
return pte_numa(pmd_pte(pmd));
}
#define pmdp_set_numa pmdp_set_numa
static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
VM_BUG_ON(1);
pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
return;
}
#define pmd_mknonnuma pmd_mknonnuma
static inline pmd_t pmd_mknonnuma(pmd_t pmd)
{

View File

@ -28,11 +28,23 @@
#ifdef __powerpc64__
/*
* Size of redzone that userspace is allowed to use below the stack
* pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
* the new ELFv2 little-endian ABI, so we allow the larger amount.
*
* For kernel code we allow a 288-byte redzone, in order to conserve
* kernel stack space; gcc currently only uses 288 bytes, and will
* hopefully allow explicit control of the redzone size in future.
*/
#define USER_REDZONE_SIZE 512
#define KERNEL_REDZONE_SIZE 288
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
STACK_FRAME_OVERHEAD + 288)
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
/* Size of dummy stack frame allocated when calling signal handler. */
@ -41,6 +53,8 @@
#else /* __powerpc64__ */
#define USER_REDZONE_SIZE 0
#define KERNEL_REDZONE_SIZE 0
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)

View File

@ -4,11 +4,11 @@
#ifdef __KERNEL__
/* Default link addresses for the vDSOs */
#define VDSO32_LBASE 0x100000
#define VDSO64_LBASE 0x100000
#define VDSO32_LBASE 0x0
#define VDSO64_LBASE 0x0
/* Default map addresses for 32bit vDSO */
#define VDSO32_MBASE VDSO32_LBASE
#define VDSO32_MBASE 0x100000
#define VDSO_VERSION_STRING LINUX_2.6.15

View File

@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
phys_addr_t paddr;
if (!csize)
return 0;
csize = min_t(size_t, csize, PAGE_SIZE);
paddr = pfn << PAGE_SHIFT;
if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
vaddr = __va(pfn << PAGE_SHIFT);
if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
vaddr = __ioremap(paddr, PAGE_SIZE, 0);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr);
}

View File

@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/export.h>
@ -89,7 +90,7 @@
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
int eeh_subsystem_enabled;
bool eeh_subsystem_enabled = false;
EXPORT_SYMBOL(eeh_subsystem_enabled);
/*
@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.total_mmio_ffs++;
if (!eeh_subsystem_enabled)
if (!eeh_enabled())
return 0;
if (!edev) {
@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name)
return -EEXIST;
}
static int eeh_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
eeh_set_enable(false);
return NOTIFY_DONE;
}
static struct notifier_block eeh_reboot_nb = {
.notifier_call = eeh_reboot_notifier,
};
/**
* eeh_init - EEH initialization
*
@ -778,6 +790,14 @@ int eeh_init(void)
if (machine_is(powernv) && cnt++ <= 0)
return ret;
/* Register reboot notifier */
ret = register_reboot_notifier(&eeh_reboot_nb);
if (ret) {
pr_warn("%s: Failed to register notifier (%d)\n",
__func__, ret);
return ret;
}
/* call platform initialization function */
if (!eeh_ops) {
pr_warning("%s: Platform EEH operation not found\n",
@ -822,7 +842,7 @@ int eeh_init(void)
return ret;
}
if (eeh_subsystem_enabled)
if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
pr_warning("EEH: No capable adapters found\n");
@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev)
struct device_node *dn;
struct eeh_dev *edev;
if (!dev || !eeh_subsystem_enabled)
if (!dev || !eeh_enabled())
return;
pr_debug("EEH: Adding device %s\n", pci_name(dev));
@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
if (!dev || !eeh_subsystem_enabled)
if (!dev || !eeh_enabled())
return;
edev = pci_dev_to_eeh_dev(dev);
@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev)
static int proc_eeh_show(struct seq_file *m, void *v)
{
if (0 == eeh_subsystem_enabled) {
if (!eeh_enabled()) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
} else {

View File

@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
return create_branch((unsigned int *)ip, addr, 0);

View File

@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
*/
_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
lwz r10,THREAD+KSP_LIMIT(r2)
addi r11,r3,THREAD_INFO_GAP
addi r11,r4,THREAD_INFO_GAP
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4
stw r10,8(r1)

View File

@ -65,8 +65,8 @@ struct rt_sigframe {
struct siginfo __user *pinfo;
void __user *puc;
struct siginfo info;
/* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
char abigap[288];
/* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16)));
static const char fmt32[] = KERN_INFO \

View File

@ -6,7 +6,7 @@
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
.incbin "arch/powerpc/kernel/vdso32/vdso32.so"
.incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
.balign PAGE_SIZE
vdso32_end:

View File

@ -6,7 +6,7 @@
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
.incbin "arch/powerpc/kernel/vdso64/vdso64.so"
.incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end:

View File

@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
}
unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, unsigned long clr)
pmd_t *pmdp, unsigned long clr,
unsigned long set)
{
unsigned long old, tmp;
@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY)
: "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
old = pmd_val(*pmdp);
*pmdp = __pmd(old & ~clr);
*pmdp = __pmd((old & ~clr) | set);
#endif
if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(mm, addr, pmdp);
@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT);
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
}
/*
@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long old;
pgtable_t *pgtable_slot;
old = pmd_hugepage_update(mm, addr, pmdp, ~0UL);
old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
/*
* We have pmd == none and we are holding page_table_lock.

View File

@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; npages > 0; --npages) {
pte_update(mm, addr, pte, 0, 0);
pte_update(mm, addr, pte, 0, 0, 0);
addr += PAGE_SIZE;
++pte;
}

View File

@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb,
/* We simply send special EEH event */
if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
(events & OPAL_EVENT_PCI_ERROR))
(events & OPAL_EVENT_PCI_ERROR) &&
eeh_enabled())
eeh_send_failure_event(NULL);
return 0;
@ -113,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
#endif /* CONFIG_DEBUG_FS */
/**
* ioda_eeh_post_init - Chip dependent post initialization
* @hose: PCI controller
@ -220,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
return ret;
}
static void ioda_eeh_phb_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
long rc;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
}
/**
* ioda_eeh_get_state - Retrieve the state of PE
* @pe: EEH PE
@ -271,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
result |= EEH_STATE_DMA_ACTIVE;
result |= EEH_STATE_MMIO_ENABLED;
result |= EEH_STATE_DMA_ENABLED;
} else if (!(pe->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(hose);
}
return result;
@ -314,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
__func__, fstate, hose->global_number, pe_no);
}
/* Dump PHB diag-data for frozen PE */
if (result != EEH_STATE_NOT_SUPPORT &&
(result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
(EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
!(pe->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(hose);
}
return result;
}
@ -489,8 +519,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
static int ioda_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
struct eeh_dev *edev;
struct pci_dev *dev;
struct pci_bus *bus;
int ret;
/*
@ -519,72 +548,16 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
if (pe->type & EEH_PE_PHB) {
ret = ioda_eeh_phb_reset(hose, option);
} else {
if (pe->type & EEH_PE_DEVICE) {
/*
* If it's device PE, we didn't refer to the parent
* PCI bus yet. So we have to figure it out indirectly.
*/
edev = list_first_entry(&pe->edevs,
struct eeh_dev, list);
dev = eeh_dev_to_pci_dev(edev);
dev = dev->bus->self;
} else {
/*
* If it's bus PE, the parent PCI bus is already there
* and just pick it up.
*/
dev = pe->bus->self;
}
/*
* Do reset based on the fact that the direct upstream bridge
* is root bridge (port) or not.
*/
if (dev->bus->number == 0)
bus = eeh_pe_bus_get(pe);
if (pci_is_root_bus(bus))
ret = ioda_eeh_root_reset(hose, option);
else
ret = ioda_eeh_bridge_reset(hose, dev, option);
ret = ioda_eeh_bridge_reset(hose, bus->self, option);
}
return ret;
}
/**
* ioda_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: Severity level of the log
* @drv_log: buffer to store the log
* @len: space of the log buffer
*
* The function is used to retrieve error log from P7IOC.
*/
static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
char *drv_log, unsigned long len)
{
s64 ret;
unsigned long flags;
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
spin_lock_irqsave(&phb->lock, flags);
ret = opal_pci_get_phb_diag_data2(phb->opal_id,
phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
if (ret) {
spin_unlock_irqrestore(&phb->lock, flags);
pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
__func__, hose->global_number, pe->addr, ret);
return -EIO;
}
/* The PHB diag-data is always indicative */
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
spin_unlock_irqrestore(&phb->lock, flags);
return 0;
}
/**
* ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
* @pe: EEH PE
@ -666,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
}
}
static void ioda_eeh_phb_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
long rc;
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
}
static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
struct eeh_pe **pe)
{
@ -854,6 +811,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
__func__, err_type);
}
/*
* EEH core will try recover from fenced PHB or
* frozen PE. In the time for frozen PE, EEH core
* enable IO path for that before collecting logs,
* but it ruins the site. So we have to dump the
* log in advance here.
*/
if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
ret == EEH_NEXT_ERR_FENCED_PHB) &&
!((*pe)->state & EEH_PE_ISOLATED)) {
eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
ioda_eeh_phb_diag(hose);
}
/*
* If we have no errors on the specific PHB or only
* informative error there, we continue poking it.
@ -872,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = {
.set_option = ioda_eeh_set_option,
.get_state = ioda_eeh_get_state,
.reset = ioda_eeh_reset,
.get_log = ioda_eeh_get_log,
.configure_bridge = ioda_eeh_configure_bridge,
.next_error = ioda_eeh_next_error
};

View File

@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Enable EEH explicitly so that we will do EEH check
* while accessing I/O stuff
*/
eeh_subsystem_enabled = 1;
eeh_set_enable(true);
/* Save memory bars */
eeh_save_bars(edev);

View File

@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc)
}
}
static u64 opal_scom_unmangle(u64 reg)
static u64 opal_scom_unmangle(u64 addr)
{
/*
* XSCOM indirect addresses have the top bit set. Additionally
* the reset of the top 3 nibbles is always 0.
* the rest of the top 3 nibbles is always 0.
*
* Because the debugfs interface uses signed offsets and shifts
* the address left by 3, we basically cannot use the top 4 bits
@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg)
* conversion here. To leave room for further xscom address
* expansion, we only clear out the top byte
*
* For in-kernel use, we also support the real indirect bit, so
* we test for any of the top 5 bits
*
*/
if (reg & (1ull << 59))
reg = (reg & ~(0xffull << 56)) | (1ull << 63);
return reg;
if (addr & (0x1full << 59))
addr = (addr & ~(0xffull << 56)) | (1ull << 63);
return addr;
}
static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
int64_t rc;
__be64 v;
reg = opal_scom_unmangle(reg);
rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
reg = opal_scom_unmangle(m->addr + reg);
rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v));
*value = be64_to_cpu(v);
return opal_xscom_err_xlate(rc);
}
@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value)
struct opal_scom_map *m = map;
int64_t rc;
reg = opal_scom_unmangle(reg);
rc = opal_xscom_write(m->chip, m->addr + reg, value);
reg = opal_scom_unmangle(m->addr + reg);
rc = opal_xscom_write(m->chip, reg, value);
return opal_xscom_err_xlate(rc);
}

View File

@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version);
pr_info(" brdgCtl: %08x\n", data->brdgCtl);
pr_info(" portStatusReg: %08x\n", data->portStatusReg);
pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
pr_info(" deviceStatus: %08x\n", data->deviceStatus);
pr_info(" slotStatus: %08x\n", data->slotStatus);
pr_info(" linkStatus: %08x\n", data->linkStatus);
pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
pr_info(" devSecStatus: %08x\n", data->devSecStatus);
pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
pr_info(" sourceId: %08x\n", data->sourceId);
pr_info(" errorClass: %016llx\n", data->errorClass);
pr_info(" correlator: %016llx\n", data->correlator);
pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
pr_info(" lemFir: %016llx\n", data->lemFir);
pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
pr_info(" lemWOF: %016llx\n", data->lemWOF);
pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
if (data->brdgCtl)
pr_info(" brdgCtl: %08x\n",
data->brdgCtl);
if (data->portStatusReg || data->rootCmplxStatus ||
data->busAgentStatus)
pr_info(" UtlSts: %08x %08x %08x\n",
data->portStatusReg, data->rootCmplxStatus,
data->busAgentStatus);
if (data->deviceStatus || data->slotStatus ||
data->linkStatus || data->devCmdStatus ||
data->devSecStatus)
pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
data->deviceStatus, data->slotStatus,
data->linkStatus, data->devCmdStatus,
data->devSecStatus);
if (data->rootErrorStatus || data->uncorrErrorStatus ||
data->corrErrorStatus)
pr_info(" RootErrSts: %08x %08x %08x\n",
data->rootErrorStatus, data->uncorrErrorStatus,
data->corrErrorStatus);
if (data->tlpHdr1 || data->tlpHdr2 ||
data->tlpHdr3 || data->tlpHdr4)
pr_info(" RootErrLog: %08x %08x %08x %08x\n",
data->tlpHdr1, data->tlpHdr2,
data->tlpHdr3, data->tlpHdr4);
if (data->sourceId || data->errorClass ||
data->correlator)
pr_info(" RootErrLog1: %08x %016llx %016llx\n",
data->sourceId, data->errorClass,
data->correlator);
if (data->p7iocPlssr || data->p7iocCsr)
pr_info(" PhbSts: %016llx %016llx\n",
data->p7iocPlssr, data->p7iocCsr);
if (data->lemFir || data->lemErrorMask ||
data->lemWOF)
pr_info(" Lem: %016llx %016llx %016llx\n",
data->lemFir, data->lemErrorMask,
data->lemWOF);
if (data->phbErrorStatus || data->phbFirstErrorStatus ||
data->phbErrorLog0 || data->phbErrorLog1)
pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
data->phbErrorStatus, data->phbFirstErrorStatus,
data->phbErrorLog0, data->phbErrorLog1);
if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
data->mmioErrorLog0 || data->mmioErrorLog1)
pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
data->mmioErrorStatus, data->mmioFirstErrorStatus,
data->mmioErrorLog0, data->mmioErrorLog1);
if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
data->dma0ErrorLog0 || data->dma0ErrorLog1)
pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
data->dma0ErrorStatus, data->dma0FirstErrorStatus,
data->dma0ErrorLog0, data->dma0ErrorLog1);
if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
data->dma1ErrorLog0 || data->dma1ErrorLog1)
pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
data->dma1ErrorStatus, data->dma1FirstErrorStatus,
data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
pr_info(" PESTB: %016llx\n", data->pestB[i]);
pr_info(" PE[%3d] A/B: %016llx %016llx\n",
i, data->pestA[i], data->pestB[i]);
}
}
@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version);
pr_info(" brdgCtl: %08x\n", data->brdgCtl);
pr_info(" portStatusReg: %08x\n", data->portStatusReg);
pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
pr_info(" deviceStatus: %08x\n", data->deviceStatus);
pr_info(" slotStatus: %08x\n", data->slotStatus);
pr_info(" linkStatus: %08x\n", data->linkStatus);
pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
pr_info(" devSecStatus: %08x\n", data->devSecStatus);
pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
pr_info(" sourceId: %08x\n", data->sourceId);
pr_info(" errorClass: %016llx\n", data->errorClass);
pr_info(" correlator: %016llx\n", data->correlator);
pr_info(" nFir: %016llx\n", data->nFir);
pr_info(" nFirMask: %016llx\n", data->nFirMask);
pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
pr_info(" PhbCsr: %016llx\n", data->phbCsr);
pr_info(" lemFir: %016llx\n", data->lemFir);
pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
pr_info(" lemWOF: %016llx\n", data->lemWOF);
pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
if (data->brdgCtl)
pr_info(" brdgCtl: %08x\n",
data->brdgCtl);
if (data->portStatusReg || data->rootCmplxStatus ||
data->busAgentStatus)
pr_info(" UtlSts: %08x %08x %08x\n",
data->portStatusReg, data->rootCmplxStatus,
data->busAgentStatus);
if (data->deviceStatus || data->slotStatus ||
data->linkStatus || data->devCmdStatus ||
data->devSecStatus)
pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
data->deviceStatus, data->slotStatus,
data->linkStatus, data->devCmdStatus,
data->devSecStatus);
if (data->rootErrorStatus || data->uncorrErrorStatus ||
data->corrErrorStatus)
pr_info(" RootErrSts: %08x %08x %08x\n",
data->rootErrorStatus, data->uncorrErrorStatus,
data->corrErrorStatus);
if (data->tlpHdr1 || data->tlpHdr2 ||
data->tlpHdr3 || data->tlpHdr4)
pr_info(" RootErrLog: %08x %08x %08x %08x\n",
data->tlpHdr1, data->tlpHdr2,
data->tlpHdr3, data->tlpHdr4);
if (data->sourceId || data->errorClass ||
data->correlator)
pr_info(" RootErrLog1: %08x %016llx %016llx\n",
data->sourceId, data->errorClass,
data->correlator);
if (data->nFir || data->nFirMask ||
data->nFirWOF)
pr_info(" nFir: %016llx %016llx %016llx\n",
data->nFir, data->nFirMask,
data->nFirWOF);
if (data->phbPlssr || data->phbCsr)
pr_info(" PhbSts: %016llx %016llx\n",
data->phbPlssr, data->phbCsr);
if (data->lemFir || data->lemErrorMask ||
data->lemWOF)
pr_info(" Lem: %016llx %016llx %016llx\n",
data->lemFir, data->lemErrorMask,
data->lemWOF);
if (data->phbErrorStatus || data->phbFirstErrorStatus ||
data->phbErrorLog0 || data->phbErrorLog1)
pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
data->phbErrorStatus, data->phbFirstErrorStatus,
data->phbErrorLog0, data->phbErrorLog1);
if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
data->mmioErrorLog0 || data->mmioErrorLog1)
pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
data->mmioErrorStatus, data->mmioFirstErrorStatus,
data->mmioErrorLog0, data->mmioErrorLog1);
if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
data->dma0ErrorLog0 || data->dma0ErrorLog1)
pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
data->dma0ErrorStatus, data->dma0FirstErrorStatus,
data->dma0ErrorLog0, data->dma0ErrorLog1);
if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
data->dma1ErrorLog0 || data->dma1ErrorLog1)
pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
data->dma1ErrorStatus, data->dma1FirstErrorStatus,
data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
pr_info(" PESTB: %016llx\n", data->pestB[i]);
pr_info(" PE[%3d] A/B: %016llx %016llx\n",
i, data->pestA[i], data->pestB[i]);
}
}

View File

@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
enable = 1;
if (enable) {
eeh_subsystem_enabled = 1;
eeh_set_enable(true);
eeh_add_to_parent_pe(edev);
pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",

View File

@ -35,12 +35,7 @@
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
static struct rtas_args rtas_stop_self_args = {
.token = RTAS_UNKNOWN_SERVICE,
.nargs = 0,
.nret = 1,
.rets = &rtas_stop_self_args.args[0],
};
static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
@ -93,15 +88,20 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void)
{
struct rtas_args *args = &rtas_stop_self_args;
struct rtas_args args = {
.token = cpu_to_be32(rtas_stop_self_token),
.nargs = 0,
.nret = 1,
.rets = &args.args[0],
};
local_irq_disable();
BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
enter_rtas(__pa(args));
enter_rtas(__pa(&args));
panic("Alas, I survived.\n");
}
@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void)
}
}
rtas_stop_self_args.token = rtas_token("stop-self");
rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");

View File

@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
const __be32 *pcie_link_speed_stats;
u32 pcie_link_speed_stats[2];
int rc;
bus = bridge->bus;
@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
pcie_link_speed_stats = of_get_property(pdn,
"ibm,pcie-link-speed-stats", NULL);
if (pcie_link_speed_stats)
rc = of_property_read_u32_array(pdn,
"ibm,pcie-link-speed-stats",
&pcie_link_speed_stats[0], 2);
if (!rc)
break;
}
of_node_put(pdn);
if (!pcie_link_speed_stats) {
if (rc) {
pr_err("no ibm,pcie-link-speed-stats property\n");
return 0;
}
switch (be32_to_cpup(pcie_link_speed_stats)) {
switch (pcie_link_speed_stats[0]) {
case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->max_bus_speed = PCIE_SPEED_5_0GT;
break;
case 0x04:
bus->max_bus_speed = PCIE_SPEED_8_0GT;
break;
default:
bus->max_bus_speed = PCI_SPEED_UNKNOWN;
break;
}
switch (be32_to_cpup(pcie_link_speed_stats)) {
switch (pcie_link_speed_stats[1]) {
case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->cur_bus_speed = PCIE_SPEED_5_0GT;
break;
case 0x04:
bus->cur_bus_speed = PCIE_SPEED_8_0GT;
break;
default:
bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
break;

View File

@ -1421,5 +1421,5 @@ ENTRY(sys_sched_setattr_wrapper)
ENTRY(sys_sched_getattr_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # const char __user *
llgfr %r3,%r3 # unsigned int
llgfr %r4,%r4 # unsigned int
jg sys_sched_getattr

View File

@ -206,11 +206,13 @@ static void dma_cleanup_tables(struct zpci_dev *zdev)
zdev->dma_table = NULL;
}
static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
int size)
static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
unsigned long start, int size)
{
unsigned long boundary_size = 0x1000000;
unsigned long boundary_size;
boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, 0, boundary_size, 0);
}

View File

@ -27,7 +27,7 @@ config SPARC
select RTC_DRV_M48T59
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP

View File

@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/kdebug.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
@ -62,6 +63,7 @@ extern unsigned long last_valid_pfn;
static pgd_t *srmmu_swapper_pg_dir;
const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
EXPORT_SYMBOL(sparc32_cachetlb_ops);
#ifdef CONFIG_SMP
const struct sparc32_cachetlb_ops *local_ops;

View File

@ -111,7 +111,7 @@ struct mem_vector {
};
#define MEM_AVOID_MAX 5
struct mem_vector mem_avoid[MEM_AVOID_MAX];
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
{
@ -180,7 +180,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
}
/* Does this memory vector overlap a known avoided area? */
bool mem_avoid_overlap(struct mem_vector *img)
static bool mem_avoid_overlap(struct mem_vector *img)
{
int i;
@ -192,8 +192,9 @@ bool mem_avoid_overlap(struct mem_vector *img)
return false;
}
unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN];
unsigned long slot_max = 0;
static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
CONFIG_PHYSICAL_ALIGN];
static unsigned long slot_max;
static void slots_append(unsigned long addr)
{

View File

@ -135,6 +135,7 @@ extern void __init old_map_region(efi_memory_desc_t *md);
extern void __init runtime_code_page_mkexec(void);
extern void __init efi_runtime_mkexec(void);
extern void __init efi_dump_pagetable(void);
extern void __init efi_apply_memmap_quirks(void);
struct efi_setup_data {
u64 fw_vendor;

View File

@ -66,6 +66,6 @@ extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
/* MSR based TSC calibration for Intel Atom SoC platforms */
int try_msr_calibrate_tsc(unsigned long *fast_calibrate);
unsigned long try_msr_calibrate_tsc(void);
#endif /* _ASM_X86_TSC_H */

View File

@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) {
if (i >= cpuc->n_events - cpuc->n_added)
--cpuc->n_added;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event);
@ -1521,6 +1524,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
@ -1534,7 +1539,6 @@ static int __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0, 0);
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
if (x86_pmu.event_attrs)
@ -1820,9 +1824,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (ret)
return ret;
if (x86_pmu.attr_rdpmc_broken)
return -ENOTSUPP;
if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val;
smp_call_function(change_rdpmc, (void *)val, 1);
on_each_cpu(change_rdpmc, (void *)val, 1);
}
return count;

View File

@ -409,6 +409,7 @@ struct x86_pmu {
/*
* sysfs attrs
*/
int attr_rdpmc_broken;
int attr_rdpmc;
struct attribute **format_attrs;
struct attribute **event_attrs;

View File

@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
if (!status) {
intel_pmu_enable_all(0);
return handled;
}
if (!status)
goto done;
loops = 0;
again:
@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void)
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
/*
* v2 and above have a perf capabilities MSR
*/
if (version > 1) {
if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);

View File

@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),

View File

@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = {
};
static __init void p6_pmu_rdpmc_quirk(void)
{
if (boot_cpu_data.x86_mask < 9) {
/*
* PPro erratum 26; fixed in stepping 9 and above.
*/
pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
x86_pmu.attr_rdpmc_broken = 1;
x86_pmu.attr_rdpmc = 0;
}
}
__init int p6_pmu_init(void)
{
x86_pmu = p6_pmu;
switch (boot_cpu_data.x86_model) {
case 1:
case 3: /* Pentium Pro */
case 5:
case 6: /* Pentium II */
case 7:
case 8:
case 11: /* Pentium III */
case 9:
case 13:
/* Pentium M */
case 1: /* Pentium Pro */
x86_add_quirk(p6_pmu_rdpmc_quirk);
break;
case 3: /* Pentium II - Klamath */
case 5: /* Pentium II - Deschutes */
case 6: /* Pentium II - Mendocino */
break;
case 7: /* Pentium III - Katmai */
case 8: /* Pentium III - Coppermine */
case 10: /* Pentium III Xeon */
case 11: /* Pentium III - Tualatin */
break;
case 9: /* Pentium M - Banias */
case 13: /* Pentium M - Dothan */
break;
default:
pr_cont("unsupported p6 CPU model %d ",
boot_cpu_data.x86_model);
pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model);
return -ENODEV;
}
x86_pmu = p6_pmu;
memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
return 0;
}

View File

@ -279,5 +279,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
(unsigned long)&_text - __START_KERNEL);
}

View File

@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
flag |= __GFP_ZERO;
again:
page = NULL;
if (!(flag & GFP_ATOMIC))
/* CMA can be used only in the context which permits sleeping */
if (flag & __GFP_WAIT)
page = dma_alloc_from_contiguous(dev, count, get_order(size));
/* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)

View File

@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p)
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
/* Once setup is done above, unmap the EFI memory map on
* mismatched firmware/kernel archtectures since there is no
* support for runtime services.
*/
if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
efi_unmap_memmap();
}
if (efi_enabled(EFI_BOOT))
efi_apply_memmap_quirks();
#endif
}

Some files were not shown because too many files have changed in this diff Show More