Merge branch 'linus' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
169310f71f
1
.mailmap
1
.mailmap
|
@ -62,6 +62,7 @@ Frank Zago <fzago@systemfabricworks.com>
|
|||
Greg Kroah-Hartman <greg@echidna.(none)>
|
||||
Greg Kroah-Hartman <gregkh@suse.de>
|
||||
Greg Kroah-Hartman <greg@kroah.com>
|
||||
Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
|
||||
Henk Vergonet <Henk.Vergonet@gmail.com>
|
||||
Henrik Kretzschmar <henne@nachtwindheim.de>
|
||||
Henrik Rydberg <rydberg@bitmath.org>
|
||||
|
|
|
@ -1,110 +1,139 @@
|
|||
What: /sys/class/ata_...
|
||||
Date: August 2008
|
||||
Contact: Gwendal Grignou<gwendal@google.com>
|
||||
Description:
|
||||
|
||||
Provide a place in sysfs for storing the ATA topology of the system. This allows
|
||||
retrieving various information about ATA objects.
|
||||
Provide a place in sysfs for storing the ATA topology of the
|
||||
system. This allows retrieving various information about ATA
|
||||
objects.
|
||||
|
||||
Files under /sys/class/ata_port
|
||||
-------------------------------
|
||||
|
||||
For each port, a directory ataX is created where X is the ata_port_id of
|
||||
the port. The device parent is the ata host device.
|
||||
For each port, a directory ataX is created where X is the ata_port_id of the
|
||||
port. The device parent is the ata host device.
|
||||
|
||||
idle_irq (read)
|
||||
|
||||
Number of IRQ received by the port while idle [some ata HBA only].
|
||||
What: /sys/class/ata_port/ataX/nr_pmp_links
|
||||
What: /sys/class/ata_port/ataX/idle_irq
|
||||
Date: May, 2010
|
||||
KernelVersion: v2.6.37
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
nr_pmp_links: (RO) If a SATA Port Multiplier (PM) is
|
||||
connected, the number of links behind it.
|
||||
|
||||
nr_pmp_links (read)
|
||||
idle_irq: (RO) Number of IRQ received by the port while
|
||||
idle [some ata HBA only].
|
||||
|
||||
If a SATA Port Multiplier (PM) is connected, number of link behind it.
|
||||
|
||||
What: /sys/class/ata_port/ataX/port_no
|
||||
Date: May, 2013
|
||||
KernelVersion: v3.11
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
(RO) Host local port number. While registering host controller,
|
||||
port numbers are tracked based upon number of ports available on
|
||||
the controller. This attribute is needed by udev for composing
|
||||
persistent links in /dev/disk/by-path.
|
||||
|
||||
Files under /sys/class/ata_link
|
||||
-------------------------------
|
||||
|
||||
Behind each port, there is a ata_link. If there is a SATA PM in the
|
||||
topology, 15 ata_link objects are created.
|
||||
Behind each port, there is a ata_link. If there is a SATA PM in the topology, 15
|
||||
ata_link objects are created.
|
||||
|
||||
If a link is behind a port, the directory name is linkX, where X is
|
||||
ata_port_id of the port.
|
||||
If a link is behind a PM, its name is linkX.Y where X is ata_port_id
|
||||
of the parent port and Y the PM port.
|
||||
If a link is behind a port, the directory name is linkX, where X is ata_port_id
|
||||
of the port. If a link is behind a PM, its name is linkX.Y where X is
|
||||
ata_port_id of the parent port and Y the PM port.
|
||||
|
||||
hw_sata_spd_limit
|
||||
|
||||
Maximum speed supported by the connected SATA device.
|
||||
What: /sys/class/ata_link/linkX[.Y]/hw_sata_spd_limit
|
||||
What: /sys/class/ata_link/linkX[.Y]/sata_spd_limit
|
||||
What: /sys/class/ata_link/linkX[.Y]/sata_spd
|
||||
Date: May, 2010
|
||||
KernelVersion: v2.6.37
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
hw_sata_spd_limit: (RO) Maximum speed supported by the
|
||||
connected SATA device.
|
||||
|
||||
sata_spd_limit
|
||||
sata_spd_limit: (RO) Maximum speed imposed by libata.
|
||||
|
||||
Maximum speed imposed by libata.
|
||||
sata_spd: (RO) Current speed of the link
|
||||
eg. 1.5, 3 Gbps etc.
|
||||
|
||||
sata_spd
|
||||
|
||||
Current speed of the link [1.5, 3Gps,...].
|
||||
|
||||
Files under /sys/class/ata_device
|
||||
---------------------------------
|
||||
|
||||
Behind each link, up to two ata device are created.
|
||||
The name of the directory is devX[.Y].Z where:
|
||||
- X is ata_port_id of the port where the device is connected,
|
||||
- Y the port of the PM if any, and
|
||||
- Z the device id: for PATA, there is usually 2 devices [0,1],
|
||||
only 1 for SATA.
|
||||
Behind each link, up to two ata devices are created.
|
||||
The name of the directory is devX[.Y].Z where:
|
||||
- X is ata_port_id of the port where the device is connected,
|
||||
- Y the port of the PM if any, and
|
||||
- Z the device id: for PATA, there is usually 2 devices [0,1], only 1 for SATA.
|
||||
|
||||
class
|
||||
Device class. Can be "ata" for disk, "atapi" for packet device,
|
||||
"pmp" for PM, or "none" if no device was found behind the link.
|
||||
|
||||
dma_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/spdn_cnt
|
||||
What: /sys/class/ata_device/devX[.Y].Z/gscr
|
||||
What: /sys/class/ata_device/devX[.Y].Z/ering
|
||||
What: /sys/class/ata_device/devX[.Y].Z/id
|
||||
What: /sys/class/ata_device/devX[.Y].Z/pio_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/xfer_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/dma_mode
|
||||
What: /sys/class/ata_device/devX[.Y].Z/class
|
||||
Date: May, 2010
|
||||
KernelVersion: v2.6.37
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
spdn_cnt: (RO) Number of times libata decided to lower the
|
||||
speed of link due to errors.
|
||||
|
||||
Transfer modes supported by the device when in DMA mode.
|
||||
Mostly used by PATA device.
|
||||
gscr: (RO) Cached result of the dump of PM GSCR
|
||||
register. Valid registers are:
|
||||
|
||||
pio_mode
|
||||
0: SATA_PMP_GSCR_PROD_ID,
|
||||
1: SATA_PMP_GSCR_REV,
|
||||
2: SATA_PMP_GSCR_PORT_INFO,
|
||||
32: SATA_PMP_GSCR_ERROR,
|
||||
33: SATA_PMP_GSCR_ERROR_EN,
|
||||
64: SATA_PMP_GSCR_FEAT,
|
||||
96: SATA_PMP_GSCR_FEAT_EN,
|
||||
130: SATA_PMP_GSCR_SII_GPIO
|
||||
|
||||
Transfer modes supported by the device when in PIO mode.
|
||||
Mostly used by PATA device.
|
||||
Only valid if the device is a PM.
|
||||
|
||||
xfer_mode
|
||||
ering: (RO) Formatted output of the error ring of the
|
||||
device.
|
||||
|
||||
Current transfer mode.
|
||||
id: (RO) Cached result of IDENTIFY command, as
|
||||
described in ATA8 7.16 and 7.17. Only valid if
|
||||
the device is not a PM.
|
||||
|
||||
id
|
||||
pio_mode: (RO) Transfer modes supported by the device when
|
||||
in PIO mode. Mostly used by PATA device.
|
||||
|
||||
Cached result of IDENTIFY command, as described in ATA8 7.16 and 7.17.
|
||||
Only valid if the device is not a PM.
|
||||
xfer_mode: (RO) Current transfer mode
|
||||
|
||||
gscr
|
||||
dma_mode: (RO) Transfer modes supported by the device when
|
||||
in DMA mode. Mostly used by PATA device.
|
||||
|
||||
Cached result of the dump of PM GSCR register.
|
||||
Valid registers are:
|
||||
0: SATA_PMP_GSCR_PROD_ID,
|
||||
1: SATA_PMP_GSCR_REV,
|
||||
2: SATA_PMP_GSCR_PORT_INFO,
|
||||
32: SATA_PMP_GSCR_ERROR,
|
||||
33: SATA_PMP_GSCR_ERROR_EN,
|
||||
64: SATA_PMP_GSCR_FEAT,
|
||||
96: SATA_PMP_GSCR_FEAT_EN,
|
||||
130: SATA_PMP_GSCR_SII_GPIO
|
||||
Only valid if the device is a PM.
|
||||
class: (RO) Device class. Can be "ata" for disk,
|
||||
"atapi" for packet device, "pmp" for PM, or
|
||||
"none" if no device was found behind the link.
|
||||
|
||||
trim
|
||||
|
||||
Shows the DSM TRIM mode currently used by the device. Valid
|
||||
values are:
|
||||
unsupported: Drive does not support DSM TRIM
|
||||
unqueued: Drive supports unqueued DSM TRIM only
|
||||
queued: Drive supports queued DSM TRIM
|
||||
forced_unqueued: Drive's queued DSM support is known to be
|
||||
buggy and only unqueued TRIM commands
|
||||
are sent
|
||||
What: /sys/class/ata_device/devX[.Y].Z/trim
|
||||
Date: May, 2015
|
||||
KernelVersion: v4.10
|
||||
Contact: Gwendal Grignou <gwendal@chromium.org>
|
||||
Description:
|
||||
(RO) Shows the DSM TRIM mode currently used by the device. Valid
|
||||
values are:
|
||||
|
||||
spdn_cnt
|
||||
unsupported: Drive does not support DSM TRIM
|
||||
|
||||
Number of time libata decided to lower the speed of link due to errors.
|
||||
unqueued: Drive supports unqueued DSM TRIM only
|
||||
|
||||
ering
|
||||
queued: Drive supports queued DSM TRIM
|
||||
|
||||
Formatted output of the error ring of the device.
|
||||
forced_unqueued: Drive's queued DSM support is known to
|
||||
be buggy and only unqueued TRIM commands
|
||||
are sent
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
What: /sys/block/*/device/sw_activity
|
||||
Date: Jun, 2008
|
||||
KernelVersion: v2.6.27
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Used by drivers which support software controlled activity
|
||||
LEDs.
|
||||
|
||||
It has the following valid values:
|
||||
|
||||
0 OFF - the LED is not activated on activity
|
||||
1 BLINK_ON - the LED blinks on every 10ms when activity is
|
||||
detected.
|
||||
2 BLINK_OFF - the LED is on when idle, and blinks off
|
||||
every 10ms when activity is detected.
|
||||
|
||||
Note that the user must turn sw_activity OFF it they wish to
|
||||
control the activity LED via the em_message file.
|
||||
|
||||
|
||||
What: /sys/block/*/device/unload_heads
|
||||
Date: Sep, 2008
|
||||
KernelVersion: v2.6.28
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Hard disk shock protection
|
||||
|
||||
Writing an integer value to this file will take the heads of the
|
||||
respective drive off the platter and block all I/O operations
|
||||
for the specified number of milliseconds.
|
||||
|
||||
- If the device does not support the unload heads feature,
|
||||
access is denied with -EOPNOTSUPP.
|
||||
- The maximal value accepted for a timeout is 30000
|
||||
milliseconds.
|
||||
- A previously set timeout can be cancelled and disk can resume
|
||||
normal operation immediately by specifying a timeout of 0.
|
||||
- Some hard drives only comply with an earlier version of the
|
||||
ATA standard, but support the unload feature nonetheless.
|
||||
There is no safe way Linux can detect these devices, so this
|
||||
is not enabled by default. If it is known that your device
|
||||
does support the unload feature, then you can tell the kernel
|
||||
to enable it by writing -1. It can be disabled again by
|
||||
writing -2.
|
||||
- Values below -2 are rejected with -EINVAL
|
||||
|
||||
For more information, see
|
||||
Documentation/laptops/disk-shock-protection.txt
|
||||
|
||||
|
||||
What: /sys/block/*/device/ncq_prio_enable
|
||||
Date: Oct, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Write to the file to turn on or off the SATA ncq (native
|
||||
command queueing) support. By default this feature is turned
|
||||
off.
|
|
@ -27,3 +27,92 @@ Description: This file contains the current status of the "SSD Smart Path"
|
|||
the direct i/o path to physical devices. This setting is
|
||||
controller wide, affecting all configured logical drives on the
|
||||
controller. This file is readable and writable.
|
||||
|
||||
What: /sys/class/scsi_host/hostX/link_power_management_policy
|
||||
Date: Oct, 2007
|
||||
KernelVersion: v2.6.24
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) This parameter allows the user to read and set the link
|
||||
(interface) power management.
|
||||
|
||||
There are four possible options:
|
||||
|
||||
min_power: Tell the controller to try to make the link use the
|
||||
least possible power when possible. This may sacrifice some
|
||||
performance due to increased latency when coming out of lower
|
||||
power states.
|
||||
|
||||
max_performance: Generally, this means no power management.
|
||||
Tell the controller to have performance be a priority over power
|
||||
management.
|
||||
|
||||
medium_power: Tell the controller to enter a lower power state
|
||||
when possible, but do not enter the lowest power state, thus
|
||||
improving latency over min_power setting.
|
||||
|
||||
med_power_with_dipm: Identical to the existing medium_power
|
||||
setting except that it enables dipm (device initiated power
|
||||
management) on top, which makes it match the Windows IRST (Intel
|
||||
Rapid Storage Technology) driver settings. This setting is also
|
||||
close to min_power, except that:
|
||||
a) It does not use host-initiated slumber mode, but it does
|
||||
allow device-initiated slumber
|
||||
b) It does not enable low power device sleep mode (DevSlp).
|
||||
|
||||
What: /sys/class/scsi_host/hostX/em_message
|
||||
What: /sys/class/scsi_host/hostX/em_message_type
|
||||
Date: Jun, 2008
|
||||
KernelVersion: v2.6.27
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
em_message: (RW) Enclosure management support. For the LED
|
||||
protocol, writes and reads correspond to the LED message format
|
||||
as defined in the AHCI spec.
|
||||
|
||||
The user must turn sw_activity (under /sys/block/*/device/) OFF
|
||||
it they wish to control the activity LED via the em_message
|
||||
file.
|
||||
|
||||
em_message_type: (RO) Displays the current enclosure management
|
||||
protocol that is being used by the driver (for eg. LED, SAF-TE,
|
||||
SES-2, SGPIO etc).
|
||||
|
||||
What: /sys/class/scsi_host/hostX/ahci_port_cmd
|
||||
What: /sys/class/scsi_host/hostX/ahci_host_caps
|
||||
What: /sys/class/scsi_host/hostX/ahci_host_cap2
|
||||
Date: Mar, 2010
|
||||
KernelVersion: v2.6.35
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
[to be documented]
|
||||
|
||||
What: /sys/class/scsi_host/hostX/ahci_host_version
|
||||
Date: Mar, 2010
|
||||
KernelVersion: v2.6.35
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RO) Display the version of the AHCI spec implemented by the
|
||||
host.
|
||||
|
||||
What: /sys/class/scsi_host/hostX/em_buffer
|
||||
Date: Apr, 2010
|
||||
KernelVersion: v2.6.35
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RW) Allows access to AHCI EM (enclosure management) buffer
|
||||
directly if the host supports EM.
|
||||
|
||||
For eg. the AHCI driver supports SGPIO EM messages but the
|
||||
SATA/AHCI specs do not define the SGPIO message format of the EM
|
||||
buffer. Different hardware(HW) vendors may have different
|
||||
definitions. With the em_buffer attribute, this issue can be
|
||||
solved by allowing HW vendors to provide userland drivers and
|
||||
tools for their SGPIO initiators.
|
||||
|
||||
What: /sys/class/scsi_host/hostX/em_message_supported
|
||||
Date: Oct, 2009
|
||||
KernelVersion: v2.6.39
|
||||
Contact: linux-ide@vger.kernel.org
|
||||
Description:
|
||||
(RO) Displays supported enclosure management message types.
|
||||
|
|
|
@ -16,6 +16,7 @@ Required properties:
|
|||
- ddc: phandle to the hdmi ddc node
|
||||
- phy: phandle to the hdmi phy node
|
||||
- samsung,syscon-phandle: phandle for system controller node for PMU.
|
||||
- #sound-dai-cells: should be 0.
|
||||
|
||||
Required properties for Exynos 4210, 4212, 5420 and 5433:
|
||||
- clocks: list of clock IDs from SoC clock driver.
|
||||
|
|
|
@ -3,11 +3,11 @@ Device-Tree bindings for sigma delta modulator
|
|||
Required properties:
|
||||
- compatible: should be "ads1201", "sd-modulator". "sd-modulator" can be use
|
||||
as a generic SD modulator if modulator not specified in compatible list.
|
||||
- #io-channel-cells = <1>: See the IIO bindings section "IIO consumers".
|
||||
- #io-channel-cells = <0>: See the IIO bindings section "IIO consumers".
|
||||
|
||||
Example node:
|
||||
|
||||
ads1202: adc@0 {
|
||||
compatible = "sd-modulator";
|
||||
#io-channel-cells = <1>;
|
||||
#io-channel-cells = <0>;
|
||||
};
|
||||
|
|
|
@ -50,14 +50,15 @@ Example:
|
|||
compatible = "marvell,mv88e6085";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -74,23 +75,24 @@ Example:
|
|||
compatible = "marvell,mv88e6390";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
mdio1 {
|
||||
compatible = "marvell,mv88e6xxx-mdio-external";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy9: switch1phy0@9 {
|
||||
reg = <9>;
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy0: switch1phy0@0 {
|
||||
reg = <0>;
|
||||
interrupt-parent = <&switch0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
};
|
||||
|
||||
mdio1 {
|
||||
compatible = "marvell,mv88e6xxx-mdio-external";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
switch1phy9: switch1phy0@9 {
|
||||
reg = <9>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -27,7 +27,11 @@ Required properties:
|
|||
SoC-specific version corresponding to the platform first followed by
|
||||
the generic version.
|
||||
|
||||
- reg: offset and length of (1) the register block and (2) the stream buffer.
|
||||
- reg: Offset and length of (1) the register block and (2) the stream buffer.
|
||||
The region for the register block is mandatory.
|
||||
The region for the stream buffer is optional, as it is only present on
|
||||
R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A7796),
|
||||
and M3-N (R8A77965).
|
||||
- interrupts: A list of interrupt-specifiers, one for each entry in
|
||||
interrupt-names.
|
||||
If interrupt-names is not present, an interrupt specifier
|
||||
|
|
|
@ -19,7 +19,7 @@ Required properties:
|
|||
configured in FS mode;
|
||||
- "st,stm32f4x9-hsotg": The DWC2 USB HS controller instance in STM32F4x9 SoCs
|
||||
configured in HS mode;
|
||||
- "st,stm32f7xx-hsotg": The DWC2 USB HS controller instance in STM32F7xx SoCs
|
||||
- "st,stm32f7-hsotg": The DWC2 USB HS controller instance in STM32F7 SoCs
|
||||
configured in HS mode;
|
||||
- reg : Should contain 1 register range (address and length)
|
||||
- interrupts : Should contain 1 interrupt
|
||||
|
|
|
@ -4,6 +4,7 @@ Required properties:
|
|||
- compatible: Must contain one of the following:
|
||||
- "renesas,r8a7795-usb3-peri"
|
||||
- "renesas,r8a7796-usb3-peri"
|
||||
- "renesas,r8a77965-usb3-peri"
|
||||
- "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
|
||||
device
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@ Required properties:
|
|||
- "renesas,usbhs-r8a7794" for r8a7794 (R-Car E2) compatible device
|
||||
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
|
||||
- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
|
||||
- "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
|
||||
- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
|
||||
- "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
|
||||
- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
|
||||
|
|
|
@ -13,6 +13,7 @@ Required properties:
|
|||
- "renesas,xhci-r8a7793" for r8a7793 SoC
|
||||
- "renesas,xhci-r8a7795" for r8a7795 SoC
|
||||
- "renesas,xhci-r8a7796" for r8a7796 SoC
|
||||
- "renesas,xhci-r8a77965" for r8a77965 SoC
|
||||
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
|
||||
device
|
||||
- "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
|
||||
|
|
|
@ -20,8 +20,8 @@ TCP Segmentation Offload
|
|||
|
||||
TCP segmentation allows a device to segment a single frame into multiple
|
||||
frames with a data payload size specified in skb_shinfo()->gso_size.
|
||||
When TCP segmentation requested the bit for either SKB_GSO_TCP or
|
||||
SKB_GSO_TCP6 should be set in skb_shinfo()->gso_type and
|
||||
When TCP segmentation requested the bit for either SKB_GSO_TCPV4 or
|
||||
SKB_GSO_TCPV6 should be set in skb_shinfo()->gso_type and
|
||||
skb_shinfo()->gso_size should be set to a non-zero value.
|
||||
|
||||
TCP segmentation is dependent on support for the use of partial checksum
|
||||
|
@ -153,8 +153,18 @@ To signal this, gso_size is set to the special value GSO_BY_FRAGS.
|
|||
|
||||
Therefore, any code in the core networking stack must be aware of the
|
||||
possibility that gso_size will be GSO_BY_FRAGS and handle that case
|
||||
appropriately. (For size checks, the skb_gso_validate_*_len family of
|
||||
helpers do this automatically.)
|
||||
appropriately.
|
||||
|
||||
There are some helpers to make this easier:
|
||||
|
||||
- skb_is_gso(skb) && skb_is_gso_sctp(skb) is the best way to see if
|
||||
an skb is an SCTP GSO skb.
|
||||
|
||||
- For size checks, the skb_gso_validate_*_len family of helpers correctly
|
||||
considers GSO_BY_FRAGS.
|
||||
|
||||
- For manipulating packets, skb_increase_gso_size and skb_decrease_gso_size
|
||||
will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
|
||||
|
||||
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
|
||||
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
|
||||
|
|
50
MAINTAINERS
50
MAINTAINERS
|
@ -1060,41 +1060,42 @@ ARM PORT
|
|||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
W: http://www.armlinux.org.uk/
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
|
||||
F: arch/arm/
|
||||
X: arch/arm/boot/dts/
|
||||
|
||||
ARM PRIMECELL AACI PL041 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: sound/arm/aaci.*
|
||||
|
||||
ARM PRIMECELL BUS SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/amba/
|
||||
F: include/linux/amba/bus.h
|
||||
|
||||
ARM PRIMECELL CLCD PL110 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/video/fbdev/amba-clcd.*
|
||||
|
||||
ARM PRIMECELL KMI PL050 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/input/serio/ambakmi.*
|
||||
F: include/linux/amba/kmi.h
|
||||
|
||||
ARM PRIMECELL MMCI PL180/1 DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/mmc/host/mmci.*
|
||||
F: include/linux/amba/mmci.h
|
||||
|
||||
ARM PRIMECELL UART PL010 AND PL011 DRIVERS
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
S: Odd Fixes
|
||||
F: drivers/tty/serial/amba-pl01*.c
|
||||
F: include/linux/amba/serial.h
|
||||
|
||||
|
@ -1152,7 +1153,7 @@ S: Maintained
|
|||
F: drivers/clk/sunxi/
|
||||
|
||||
ARM/Allwinner sunXi SoC support
|
||||
M: Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
M: Maxime Ripard <maxime.ripard@bootlin.com>
|
||||
M: Chen-Yu Tsai <wens@csie.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
@ -4626,7 +4627,7 @@ F: include/uapi/drm/drm*
|
|||
F: include/linux/vga*
|
||||
|
||||
DRM DRIVERS FOR ALLWINNER A10
|
||||
M: Maxime Ripard <maxime.ripard@free-electrons.com>
|
||||
M: Maxime Ripard <maxime.ripard@bootlin.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/sun4i/
|
||||
|
@ -8452,7 +8453,7 @@ S: Orphan
|
|||
F: drivers/net/wireless/marvell/libertas/
|
||||
|
||||
MARVELL MACCHIATOBIN SUPPORT
|
||||
M: Russell King <rmk@armlinux.org.uk>
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
S: Maintained
|
||||
F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts
|
||||
|
@ -8465,7 +8466,7 @@ F: drivers/net/ethernet/marvell/mv643xx_eth.*
|
|||
F: include/linux/mv643xx.h
|
||||
|
||||
MARVELL MV88X3310 PHY DRIVER
|
||||
M: Russell King <rmk@armlinux.org.uk>
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/phy/marvell10g.c
|
||||
|
@ -9943,6 +9944,13 @@ F: Documentation/ABI/stable/sysfs-bus-nvmem
|
|||
F: include/linux/nvmem-consumer.h
|
||||
F: include/linux/nvmem-provider.h
|
||||
|
||||
NXP SGTL5000 DRIVER
|
||||
M: Fabio Estevam <fabio.estevam@nxp.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/sgtl5000.txt
|
||||
F: sound/soc/codecs/sgtl5000*
|
||||
|
||||
NXP TDA998X DRM DRIVER
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Supported
|
||||
|
@ -10345,7 +10353,7 @@ F: drivers/oprofile/
|
|||
F: include/linux/oprofile.h
|
||||
|
||||
ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
|
||||
M: Mark Fasheh <mfasheh@versity.com>
|
||||
M: Mark Fasheh <mark@fasheh.com>
|
||||
M: Joel Becker <jlbec@evilplan.org>
|
||||
L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
|
||||
W: http://ocfs2.wiki.kernel.org
|
||||
|
@ -10855,6 +10863,7 @@ F: drivers/platform/x86/peaq-wmi.c
|
|||
PER-CPU MEMORY ALLOCATOR
|
||||
M: Tejun Heo <tj@kernel.org>
|
||||
M: Christoph Lameter <cl@linux.com>
|
||||
M: Dennis Zhou <dennisszhou@gmail.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
|
||||
S: Maintained
|
||||
F: include/linux/percpu*.h
|
||||
|
@ -12125,6 +12134,7 @@ M: Sylwester Nawrocki <s.nawrocki@samsung.com>
|
|||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: sound/soc/samsung/
|
||||
F: Documentation/devicetree/bindings/sound/samsung*
|
||||
|
||||
SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
|
||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||
|
@ -12884,6 +12894,19 @@ S: Maintained
|
|||
F: drivers/net/ethernet/socionext/netsec.c
|
||||
F: Documentation/devicetree/bindings/net/socionext-netsec.txt
|
||||
|
||||
SOLIDRUN CLEARFOG SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/armada-388-clearfog*
|
||||
F: arch/arm/boot/dts/armada-38x-solidrun-*
|
||||
|
||||
SOLIDRUN CUBOX-I/HUMMINGBOARD SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/imx6*-cubox-i*
|
||||
F: arch/arm/boot/dts/imx6*-hummingboard*
|
||||
F: arch/arm/boot/dts/imx6*-sr-*
|
||||
|
||||
SONIC NETWORK DRIVER
|
||||
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -13653,7 +13676,8 @@ S: Supported
|
|||
F: drivers/i2c/busses/i2c-tegra.c
|
||||
|
||||
TEGRA IOMMU DRIVERS
|
||||
M: Hiroshi Doyu <hdoyu@nvidia.com>
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
L: linux-tegra@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/iommu/tegra*
|
||||
|
||||
|
|
11
Makefile
11
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -826,6 +826,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
|
|||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
|
||||
|
||||
# clang sets -fmerge-all-constants by default as optimization, but this
|
||||
# is non-conforming behavior for C and in fact breaks the kernel, so we
|
||||
# need to disable it here generally.
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-merge-all-constants)
|
||||
|
||||
# for gcc -fno-merge-all-constants disables everything, but it is fine
|
||||
# to have actual conforming behavior enabled.
|
||||
KBUILD_CFLAGS += $(call cc-option,-fmerge-constants)
|
||||
|
||||
# Make sure -fstack-check isn't enabled (like gentoo apparently did)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ config ARM_PTDUMP_DEBUGFS
|
|||
|
||||
config DEBUG_WX
|
||||
bool "Warn on W+X mappings at boot"
|
||||
depends on MMU
|
||||
select ARM_PTDUMP_CORE
|
||||
---help---
|
||||
Generate a warning if any W+X mappings are found at boot.
|
||||
|
|
|
@ -30,7 +30,7 @@ esac
|
|||
|
||||
sym_val() {
|
||||
# extract hex value for symbol in $1
|
||||
local val=$($NM "$VMLINUX" | sed -n "/ $1$/{s/ .*$//p;q}")
|
||||
local val=$($NM "$VMLINUX" 2>/dev/null | sed -n "/ $1\$/{s/ .*$//p;q}")
|
||||
[ "$val" ] || { echo "can't find $1 in $VMLINUX" 1>&2; exit 1; }
|
||||
# convert from hex to decimal
|
||||
echo $((0x$val))
|
||||
|
@ -48,12 +48,12 @@ data_end=$(($_edata_loc - $base_offset))
|
|||
file_end=$(stat -c "%s" "$XIPIMAGE")
|
||||
if [ "$file_end" != "$data_end" ]; then
|
||||
printf "end of xipImage doesn't match with _edata_loc (%#x vs %#x)\n" \
|
||||
$(($file_end + $base_offset)) $_edata_loc 2>&1
|
||||
$(($file_end + $base_offset)) $_edata_loc 1>&2
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# be ready to clean up
|
||||
trap 'rm -f "$XIPIMAGE.tmp"' 0 1 2 3
|
||||
trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
|
||||
|
||||
# substitute the data section by a compressed version
|
||||
$DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
|
||||
|
|
|
@ -42,6 +42,11 @@ cpu@0 {
|
|||
};
|
||||
};
|
||||
|
||||
memory@40000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x40000000 0>;
|
||||
};
|
||||
|
||||
ahb {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -42,6 +42,11 @@ cpu@0 {
|
|||
};
|
||||
};
|
||||
|
||||
memory@80000000 {
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0>;
|
||||
};
|
||||
|
||||
ahb {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
|
|
|
@ -82,7 +82,7 @@ reg_usb_otg1_vbus: regulator-usb-otg1-vbus {
|
|||
enable-active-high;
|
||||
};
|
||||
|
||||
reg_usb_otg2_vbus: regulator-usb-otg1-vbus {
|
||||
reg_usb_otg2_vbus: regulator-usb-otg2-vbus {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "usb_otg2_vbus";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
|
|
|
@ -927,6 +927,7 @@ spdif: sound@ff88b0000 {
|
|||
i2s: i2s@ff890000 {
|
||||
compatible = "rockchip,rk3288-i2s", "rockchip,rk3066-i2s";
|
||||
reg = <0x0 0xff890000 0x0 0x10000>;
|
||||
#sound-dai-cells = <0>;
|
||||
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
@ -1176,6 +1177,7 @@ hdmi: hdmi@ff980000 {
|
|||
compatible = "rockchip,rk3288-dw-hdmi";
|
||||
reg = <0x0 0xff980000 0x0 0x20000>;
|
||||
reg-io-width = <4>;
|
||||
#sound-dai-cells = <0>;
|
||||
rockchip,grf = <&grf>;
|
||||
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>, <&cru SCLK_HDMI_CEC>;
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
|
||||
/dts-v1/;
|
||||
#include "sun6i-a31s.dtsi"
|
||||
#include "sunxi-common-regulators.dtsi"
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
|
@ -99,6 +98,7 @@ &gmac {
|
|||
pinctrl-0 = <&gmac_pins_rgmii_a>, <&gmac_phy_reset_pin_bpi_m2>;
|
||||
phy = <&phy1>;
|
||||
phy-mode = "rgmii";
|
||||
phy-supply = <®_dldo1>;
|
||||
snps,reset-gpio = <&pio 0 21 GPIO_ACTIVE_HIGH>; /* PA21 */
|
||||
snps,reset-active-low;
|
||||
snps,reset-delays-us = <0 10000 30000>;
|
||||
|
@ -118,7 +118,7 @@ &ir {
|
|||
&mmc0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_bpi_m2>;
|
||||
vmmc-supply = <®_vcc3v0>;
|
||||
vmmc-supply = <®_dcdc1>;
|
||||
bus-width = <4>;
|
||||
cd-gpios = <&pio 0 4 GPIO_ACTIVE_HIGH>; /* PA4 */
|
||||
cd-inverted;
|
||||
|
@ -132,7 +132,7 @@ &mmc0_pins_a {
|
|||
&mmc2 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&mmc2_pins_a>;
|
||||
vmmc-supply = <®_vcc3v0>;
|
||||
vmmc-supply = <®_aldo1>;
|
||||
mmc-pwrseq = <&mmc2_pwrseq>;
|
||||
bus-width = <4>;
|
||||
non-removable;
|
||||
|
@ -163,6 +163,8 @@ axp22x: pmic@68 {
|
|||
reg = <0x68>;
|
||||
interrupt-parent = <&nmi_intc>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
|
||||
eldoin-supply = <®_dcdc1>;
|
||||
x-powers,drive-vbus-en;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -193,7 +195,28 @@ mmc2_pwrseq_pin_bpi_m2: mmc2_pwrseq_pin@0 {
|
|||
|
||||
#include "axp22x.dtsi"
|
||||
|
||||
®_aldo1 {
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-name = "vcc-wifi";
|
||||
};
|
||||
|
||||
®_aldo2 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <2500000>;
|
||||
regulator-max-microvolt = <2500000>;
|
||||
regulator-name = "vcc-gmac";
|
||||
};
|
||||
|
||||
®_aldo3 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <3000000>;
|
||||
regulator-max-microvolt = <3000000>;
|
||||
regulator-name = "avcc";
|
||||
};
|
||||
|
||||
®_dc5ldo {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <700000>;
|
||||
regulator-max-microvolt = <1320000>;
|
||||
regulator-name = "vdd-cpus";
|
||||
|
@ -233,6 +256,40 @@ ®_dcdc5 {
|
|||
regulator-name = "vcc-dram";
|
||||
};
|
||||
|
||||
®_dldo1 {
|
||||
regulator-min-microvolt = <3000000>;
|
||||
regulator-max-microvolt = <3000000>;
|
||||
regulator-name = "vcc-mac";
|
||||
};
|
||||
|
||||
®_dldo2 {
|
||||
regulator-min-microvolt = <2800000>;
|
||||
regulator-max-microvolt = <2800000>;
|
||||
regulator-name = "avdd-csi";
|
||||
};
|
||||
|
||||
®_dldo3 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-name = "vcc-pb";
|
||||
};
|
||||
|
||||
®_eldo1 {
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-name = "vdd-csi";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
®_ldo_io1 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-name = "vcc-pm-cpus";
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
&uart0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&uart0_pins_a>;
|
||||
|
|
|
@ -12,8 +12,6 @@ struct mm_struct;
|
|||
|
||||
void arm_install_vdso(struct mm_struct *mm, unsigned long addr);
|
||||
|
||||
extern char vdso_start, vdso_end;
|
||||
|
||||
extern unsigned int vdso_total_pages;
|
||||
|
||||
#else /* CONFIG_VDSO */
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
|
||||
static struct page **vdso_text_pagelist;
|
||||
|
||||
extern char vdso_start[], vdso_end[];
|
||||
|
||||
/* Total number of pages needed for the data and text portions of the VDSO. */
|
||||
unsigned int vdso_total_pages __ro_after_init;
|
||||
|
||||
|
@ -197,13 +199,13 @@ static int __init vdso_init(void)
|
|||
unsigned int text_pages;
|
||||
int i;
|
||||
|
||||
if (memcmp(&vdso_start, "\177ELF", 4)) {
|
||||
if (memcmp(vdso_start, "\177ELF", 4)) {
|
||||
pr_err("VDSO is not a valid ELF object!\n");
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
text_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
|
||||
pr_debug("vdso: %i text pages at base %p\n", text_pages, &vdso_start);
|
||||
text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
|
||||
pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
|
||||
|
||||
/* Allocate the VDSO text pagelist */
|
||||
vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
|
||||
|
@ -218,7 +220,7 @@ static int __init vdso_init(void)
|
|||
for (i = 0; i < text_pages; i++) {
|
||||
struct page *page;
|
||||
|
||||
page = virt_to_page(&vdso_start + i * PAGE_SIZE);
|
||||
page = virt_to_page(vdso_start + i * PAGE_SIZE);
|
||||
vdso_text_pagelist[i] = page;
|
||||
}
|
||||
|
||||
|
@ -229,7 +231,7 @@ static int __init vdso_init(void)
|
|||
|
||||
cntvct_ok = cntvct_functional();
|
||||
|
||||
patch_vdso(&vdso_start);
|
||||
patch_vdso(vdso_start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -127,8 +127,8 @@ static struct gpiod_lookup_table mmc_gpios_table = {
|
|||
.dev_id = "da830-mmc.0",
|
||||
.table = {
|
||||
/* CD: gpio3_12: gpio60: chip 1 contains gpio range 32-63*/
|
||||
GPIO_LOOKUP("davinci_gpio.1", 28, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("davinci_gpio.1", 29, "wp", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("davinci_gpio.0", 28, "cd", GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP("davinci_gpio.0", 29, "wp", GPIO_ACTIVE_LOW),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -133,6 +133,9 @@ static void __init u8500_init_machine(void)
|
|||
if (of_machine_is_compatible("st-ericsson,u8540"))
|
||||
of_platform_populate(NULL, u8500_local_bus_nodes,
|
||||
u8540_auxdata_lookup, NULL);
|
||||
else
|
||||
of_platform_populate(NULL, u8500_local_bus_nodes,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
static const char * stericsson_dt_platform_compat[] = {
|
||||
|
|
|
@ -888,11 +888,8 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
|
|||
timer->irq = irq->start;
|
||||
timer->pdev = pdev;
|
||||
|
||||
/* Skip pm_runtime_enable for OMAP1 */
|
||||
if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_irq_safe(dev);
|
||||
}
|
||||
pm_runtime_enable(dev);
|
||||
pm_runtime_irq_safe(dev);
|
||||
|
||||
if (!timer->reserved) {
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
|
|
|
@ -5,13 +5,4 @@ void omap_map_sram(unsigned long start, unsigned long size,
|
|||
unsigned long skip, int cached);
|
||||
void omap_sram_reset(void);
|
||||
|
||||
extern void *omap_sram_push_address(unsigned long size);
|
||||
|
||||
/* Macro to push a function to the internal SRAM, using the fncpy API */
|
||||
#define omap_sram_push(funcp, size) ({ \
|
||||
typeof(&(funcp)) _res = NULL; \
|
||||
void *_sram_address = omap_sram_push_address(size); \
|
||||
if (_sram_address) \
|
||||
_res = fncpy(_sram_address, &(funcp), size); \
|
||||
_res; \
|
||||
})
|
||||
extern void *omap_sram_push(void *funcp, unsigned long size);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/fncpy.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
|
@ -42,7 +43,7 @@ static void __iomem *omap_sram_ceil;
|
|||
* Note that fncpy requires the returned address to be aligned
|
||||
* to an 8-byte boundary.
|
||||
*/
|
||||
void *omap_sram_push_address(unsigned long size)
|
||||
static void *omap_sram_push_address(unsigned long size)
|
||||
{
|
||||
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
|
||||
|
||||
|
@ -60,6 +61,30 @@ void *omap_sram_push_address(unsigned long size)
|
|||
return (void *)omap_sram_ceil;
|
||||
}
|
||||
|
||||
void *omap_sram_push(void *funcp, unsigned long size)
|
||||
{
|
||||
void *sram;
|
||||
unsigned long base;
|
||||
int pages;
|
||||
void *dst = NULL;
|
||||
|
||||
sram = omap_sram_push_address(size);
|
||||
if (!sram)
|
||||
return NULL;
|
||||
|
||||
base = (unsigned long)sram & PAGE_MASK;
|
||||
pages = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
|
||||
set_memory_rw(base, pages);
|
||||
|
||||
dst = fncpy(sram, funcp, size);
|
||||
|
||||
set_memory_ro(base, pages);
|
||||
set_memory_x(base, pages);
|
||||
|
||||
return dst;
|
||||
}
|
||||
|
||||
/*
|
||||
* The SRAM context is lost during off-idle and stack
|
||||
* needs to be reset.
|
||||
|
@ -75,6 +100,9 @@ void omap_sram_reset(void)
|
|||
void __init omap_map_sram(unsigned long start, unsigned long size,
|
||||
unsigned long skip, int cached)
|
||||
{
|
||||
unsigned long base;
|
||||
int pages;
|
||||
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
|
@ -95,4 +123,10 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
|
|||
*/
|
||||
memset_io(omap_sram_base + omap_sram_skip, 0,
|
||||
omap_sram_size - omap_sram_skip);
|
||||
|
||||
base = (unsigned long)omap_sram_base;
|
||||
pages = PAGE_ALIGN(omap_sram_size) / PAGE_SIZE;
|
||||
|
||||
set_memory_ro(base, pages);
|
||||
set_memory_x(base, pages);
|
||||
}
|
||||
|
|
|
@ -648,7 +648,7 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
|
|||
*/
|
||||
static int vfp_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
vfp_force_reload(cpu, current_thread_info());
|
||||
vfp_current_hw_state[cpu] = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -406,8 +406,9 @@ pp1800_pcie: pp1800-pcie {
|
|||
wlan_pd_n: wlan-pd-n {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "wlan_pd_n";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&wlan_module_reset_l>;
|
||||
|
||||
/* Note the wlan_module_reset_l pinctrl */
|
||||
enable-active-high;
|
||||
gpio = <&gpio1 11 GPIO_ACTIVE_HIGH>;
|
||||
|
||||
|
@ -983,12 +984,6 @@ &pinctrl {
|
|||
pinctrl-0 = <
|
||||
&ap_pwroff /* AP will auto-assert this when in S3 */
|
||||
&clk_32k /* This pin is always 32k on gru boards */
|
||||
|
||||
/*
|
||||
* We want this driven low ASAP; firmware should help us, but
|
||||
* we can help ourselves too.
|
||||
*/
|
||||
&wlan_module_reset_l
|
||||
>;
|
||||
|
||||
pcfg_output_low: pcfg-output-low {
|
||||
|
@ -1168,12 +1163,7 @@ wifi_perst_l: wifi-perst-l {
|
|||
};
|
||||
|
||||
wlan_module_reset_l: wlan-module-reset-l {
|
||||
/*
|
||||
* We want this driven low ASAP (As {Soon,Strongly} As
|
||||
* Possible), to avoid leakage through the powered-down
|
||||
* WiFi.
|
||||
*/
|
||||
rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_output_low>;
|
||||
rockchip,pins = <1 11 RK_FUNC_GPIO &pcfg_pull_none>;
|
||||
};
|
||||
|
||||
bt_host_wake_l: bt-host-wake-l {
|
||||
|
|
|
@ -411,8 +411,8 @@ usbdrd_dwc3_0: dwc3 {
|
|||
reg = <0x0 0xfe800000 0x0 0x100000>;
|
||||
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
dr_mode = "otg";
|
||||
phys = <&u2phy0_otg>, <&tcphy0_usb3>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
phys = <&u2phy0_otg>;
|
||||
phy-names = "usb2-phy";
|
||||
phy_type = "utmi_wide";
|
||||
snps,dis_enblslpm_quirk;
|
||||
snps,dis-u2-freeclk-exists-quirk;
|
||||
|
@ -444,8 +444,8 @@ usbdrd_dwc3_1: dwc3 {
|
|||
reg = <0x0 0xfe900000 0x0 0x100000>;
|
||||
interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH 0>;
|
||||
dr_mode = "otg";
|
||||
phys = <&u2phy1_otg>, <&tcphy1_usb3>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
phys = <&u2phy1_otg>;
|
||||
phy-names = "usb2-phy";
|
||||
phy_type = "utmi_wide";
|
||||
snps,dis_enblslpm_quirk;
|
||||
snps,dis-u2-freeclk-exists-quirk;
|
||||
|
|
|
@ -363,8 +363,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
vcpu_load(vcpu);
|
||||
|
||||
trace_kvm_set_guest_debug(vcpu, dbg->control);
|
||||
|
||||
if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
|
||||
|
@ -386,7 +384,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
out:
|
||||
vcpu_put(vcpu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -972,3 +972,13 @@ int pmd_clear_huge(pmd_t *pmdp)
|
|||
pmd_clear(pmdp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_free_pmd_page(pud_t *pud)
|
||||
{
|
||||
return pud_none(*pud);
|
||||
}
|
||||
|
||||
int pmd_free_pte_page(pmd_t *pmd)
|
||||
{
|
||||
return pmd_none(*pmd);
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
#ifndef __H8300_BYTEORDER_H__
|
||||
#define __H8300_BYTEORDER_H__
|
||||
|
||||
#define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
|
||||
#include <linux/byteorder/big_endian.h>
|
||||
|
||||
#endif
|
||||
|
|
|
@ -24,6 +24,7 @@ config MICROBLAZE
|
|||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select NO_BOOTMEM
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_OPROFILE
|
||||
|
|
|
@ -8,7 +8,6 @@ menu "Platform options"
|
|||
|
||||
config OPT_LIB_FUNCTION
|
||||
bool "Optimalized lib function"
|
||||
depends on CPU_LITTLE_ENDIAN
|
||||
default y
|
||||
help
|
||||
Allows turn on optimalized library function (memcpy and memmove).
|
||||
|
@ -21,6 +20,7 @@ config OPT_LIB_FUNCTION
|
|||
config OPT_LIB_ASM
|
||||
bool "Optimalized lib function ASM"
|
||||
depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
|
||||
depends on CPU_BIG_ENDIAN
|
||||
default n
|
||||
help
|
||||
Allows turn on optimalized library function (memcpy and memmove).
|
||||
|
|
|
@ -44,7 +44,6 @@ void machine_shutdown(void);
|
|||
void machine_halt(void);
|
||||
void machine_power_off(void);
|
||||
|
||||
extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
|
||||
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
|
||||
|
||||
# endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -29,10 +29,6 @@
|
|||
* between mem locations with size of xfer spec'd in bytes
|
||||
*/
|
||||
|
||||
#ifdef __MICROBLAZEEL__
|
||||
#error Microblaze LE not support ASM optimized lib func. Disable OPT_LIB_ASM.
|
||||
#endif
|
||||
|
||||
#include <linux/linkage.h>
|
||||
.text
|
||||
.globl memcpy
|
||||
|
|
|
@ -32,9 +32,6 @@ int mem_init_done;
|
|||
#ifndef CONFIG_MMU
|
||||
unsigned int __page_offset;
|
||||
EXPORT_SYMBOL(__page_offset);
|
||||
|
||||
#else
|
||||
static int init_bootmem_done;
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
char *klimit = _end;
|
||||
|
@ -117,7 +114,6 @@ static void __init paging_init(void)
|
|||
|
||||
void __init setup_memory(void)
|
||||
{
|
||||
unsigned long map_size;
|
||||
struct memblock_region *reg;
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
|
@ -174,17 +170,6 @@ void __init setup_memory(void)
|
|||
pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
|
||||
pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
|
||||
|
||||
/*
|
||||
* Find an area to use for the bootmem bitmap.
|
||||
* We look for the first area which is at least
|
||||
* 128kB in length (128kB is enough for a bitmap
|
||||
* for 4GB of memory, using 4kB pages), plus 1 page
|
||||
* (in case the address isn't page-aligned).
|
||||
*/
|
||||
map_size = init_bootmem_node(NODE_DATA(0),
|
||||
PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn);
|
||||
memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size);
|
||||
|
||||
/* Add active regions with valid PFNs */
|
||||
for_each_memblock(memory, reg) {
|
||||
unsigned long start_pfn, end_pfn;
|
||||
|
@ -196,32 +181,9 @@ void __init setup_memory(void)
|
|||
&memblock.memory, 0);
|
||||
}
|
||||
|
||||
/* free bootmem is whole main memory */
|
||||
free_bootmem_with_active_regions(0, max_low_pfn);
|
||||
|
||||
/* reserve allocate blocks */
|
||||
for_each_memblock(reserved, reg) {
|
||||
unsigned long top = reg->base + reg->size - 1;
|
||||
|
||||
pr_debug("reserved - 0x%08x-0x%08x, %lx, %lx\n",
|
||||
(u32) reg->base, (u32) reg->size, top,
|
||||
memory_start + lowmem_size - 1);
|
||||
|
||||
if (top <= (memory_start + lowmem_size - 1)) {
|
||||
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
|
||||
} else if (reg->base < (memory_start + lowmem_size - 1)) {
|
||||
unsigned long trunc_size = memory_start + lowmem_size -
|
||||
reg->base;
|
||||
reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX need to clip this if using highmem? */
|
||||
sparse_memory_present_with_active_regions(0);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
init_bootmem_done = 1;
|
||||
#endif
|
||||
paging_init();
|
||||
}
|
||||
|
||||
|
@ -398,30 +360,16 @@ asmlinkage void __init mmu_init(void)
|
|||
/* This is only called until mem_init is done. */
|
||||
void __init *early_get_page(void)
|
||||
{
|
||||
void *p;
|
||||
if (init_bootmem_done) {
|
||||
p = alloc_bootmem_pages(PAGE_SIZE);
|
||||
} else {
|
||||
/*
|
||||
* Mem start + kernel_tlb -> here is limit
|
||||
* because of mem mapping from head.S
|
||||
*/
|
||||
p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||
memory_start + kernel_tlb));
|
||||
}
|
||||
return p;
|
||||
/*
|
||||
* Mem start + kernel_tlb -> here is limit
|
||||
* because of mem mapping from head.S
|
||||
*/
|
||||
return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
|
||||
memory_start + kernel_tlb));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
void * __ref alloc_maybe_bootmem(size_t size, gfp_t mask)
|
||||
{
|
||||
if (mem_init_done)
|
||||
return kmalloc(size, mask);
|
||||
else
|
||||
return alloc_bootmem(size);
|
||||
}
|
||||
|
||||
void * __ref zalloc_maybe_bootmem(size_t size, gfp_t mask)
|
||||
{
|
||||
void *p;
|
||||
|
|
|
@ -13,6 +13,8 @@ choice
|
|||
config SOC_AMAZON_SE
|
||||
bool "Amazon SE"
|
||||
select SOC_TYPE_XWAY
|
||||
select MFD_SYSCON
|
||||
select MFD_CORE
|
||||
|
||||
config SOC_XWAY
|
||||
bool "XWAY"
|
||||
|
|
|
@ -549,9 +549,9 @@ void __init ltq_soc_init(void)
|
|||
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
|
||||
ltq_ar9_fpi_hz(), CLOCK_250M);
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
||||
clkdev_add_pmu("1f203034.usb2-phy", "phy", 1, 0, PMU_USB1_P);
|
||||
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1);
|
||||
clkdev_add_pmu("1e106000.usb", "otg", 1, 0, PMU_USB1 | PMU_AHBM);
|
||||
clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH);
|
||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
||||
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
||||
|
@ -560,7 +560,7 @@ void __init ltq_soc_init(void)
|
|||
} else {
|
||||
clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
|
||||
ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "ctrl", 1, 0, PMU_USB0);
|
||||
clkdev_add_pmu("1e101000.usb", "otg", 1, 0, PMU_USB0 | PMU_AHBM);
|
||||
clkdev_add_pmu("1f203018.usb2-phy", "phy", 1, 0, PMU_USB0_P);
|
||||
clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
|
||||
clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
|
||||
|
|
|
@ -170,6 +170,28 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
|
|||
u32 n1;
|
||||
u32 rev;
|
||||
|
||||
/* Early detection of CMP support */
|
||||
mips_cm_probe();
|
||||
mips_cpc_probe();
|
||||
|
||||
if (mips_cps_numiocu(0)) {
|
||||
/*
|
||||
* mips_cm_probe() wipes out bootloader
|
||||
* config for CM regions and we have to configure them
|
||||
* again. This SoC cannot talk to pamlbus devices
|
||||
* witout proper iocu region set up.
|
||||
*
|
||||
* FIXME: it would be better to do this with values
|
||||
* from DT, but we need this very early because
|
||||
* without this we cannot talk to pretty much anything
|
||||
* including serial.
|
||||
*/
|
||||
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
|
||||
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
|
||||
CM_GCR_REGn_MASK_CMTGT_IOCU0);
|
||||
__sync();
|
||||
}
|
||||
|
||||
n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
|
||||
n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
|
||||
|
||||
|
@ -194,26 +216,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
|
|||
|
||||
rt2880_pinmux_data = mt7621_pinmux_data;
|
||||
|
||||
/* Early detection of CMP support */
|
||||
mips_cm_probe();
|
||||
mips_cpc_probe();
|
||||
|
||||
if (mips_cps_numiocu(0)) {
|
||||
/*
|
||||
* mips_cm_probe() wipes out bootloader
|
||||
* config for CM regions and we have to configure them
|
||||
* again. This SoC cannot talk to pamlbus devices
|
||||
* witout proper iocu region set up.
|
||||
*
|
||||
* FIXME: it would be better to do this with values
|
||||
* from DT, but we need this very early because
|
||||
* without this we cannot talk to pretty much anything
|
||||
* including serial.
|
||||
*/
|
||||
write_gcr_reg0_base(MT7621_PALMBUS_BASE);
|
||||
write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
|
||||
CM_GCR_REGn_MASK_CMTGT_IOCU0);
|
||||
}
|
||||
|
||||
if (!register_cps_smp_ops())
|
||||
return;
|
||||
|
|
|
@ -96,16 +96,9 @@ static void ralink_restart(char *command)
|
|||
unreachable();
|
||||
}
|
||||
|
||||
static void ralink_halt(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
unreachable();
|
||||
}
|
||||
|
||||
static int __init mips_reboot_setup(void)
|
||||
{
|
||||
_machine_restart = ralink_restart;
|
||||
_machine_halt = ralink_halt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -543,7 +543,8 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
rp3440, etc. So, avoid it if the mm isn't too big. */
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_all();
|
||||
if (mm->context)
|
||||
flush_tlb_all();
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
@ -571,6 +572,8 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
pfn = pte_pfn(*ptep);
|
||||
if (!pfn_valid(pfn))
|
||||
continue;
|
||||
if (unlikely(mm->context))
|
||||
flush_tlb_page(vma, addr);
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
@ -579,26 +582,46 @@ void flush_cache_mm(struct mm_struct *mm)
|
|||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
unsigned long addr;
|
||||
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
end - start >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_range(vma, start, end);
|
||||
if (vma->vm_mm->context)
|
||||
flush_tlb_range(vma, start, end);
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
flush_tlb_range(vma, start, end);
|
||||
if (vma->vm_mm->context == mfsp(3)) {
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
flush_tlb_range(vma, start, end);
|
||||
return;
|
||||
}
|
||||
|
||||
pgd = vma->vm_mm->pgd;
|
||||
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
pte_t *ptep = get_ptep(pgd, addr);
|
||||
if (!ptep)
|
||||
continue;
|
||||
pfn = pte_pfn(*ptep);
|
||||
if (pfn_valid(pfn)) {
|
||||
if (unlikely(vma->vm_mm->context))
|
||||
flush_tlb_page(vma, addr);
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
|
||||
{
|
||||
BUG_ON(!vma->vm_mm->context);
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
if (likely(vma->vm_mm->context))
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -87,6 +87,9 @@ typedef struct {
|
|||
/* Number of bits in the mm_cpumask */
|
||||
atomic_t active_cpus;
|
||||
|
||||
/* Number of users of the external (Nest) MMU */
|
||||
atomic_t copros;
|
||||
|
||||
/* NPU NMMU context */
|
||||
struct npu_context *npu_context;
|
||||
|
||||
|
|
|
@ -47,9 +47,6 @@ extern void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmad
|
|||
#endif
|
||||
extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
|
||||
extern void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr);
|
||||
extern void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
|
||||
unsigned long page_size);
|
||||
extern void radix__flush_tlb_lpid(unsigned long lpid);
|
||||
extern void radix__flush_tlb_all(void);
|
||||
extern void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
|
||||
unsigned long address);
|
||||
|
|
|
@ -203,6 +203,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
||||
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
||||
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
||||
#define CPU_FTR_P9_TLBIE_BUG LONG_ASM_CONST(0x2000000000000000)
|
||||
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
||||
#define CPU_FTR_POWER9_DD2_1 LONG_ASM_CONST(0x8000000000000000)
|
||||
|
||||
|
@ -465,7 +466,7 @@ static inline void cpu_feature_keys_init(void) { }
|
|||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
||||
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | \
|
||||
CPU_FTR_PKEY)
|
||||
CPU_FTR_PKEY | CPU_FTR_P9_TLBIE_BUG)
|
||||
#define CPU_FTRS_POWER9_DD1 ((CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD1) & \
|
||||
(~CPU_FTR_SAO))
|
||||
#define CPU_FTRS_POWER9_DD2_0 CPU_FTRS_POWER9
|
||||
|
|
|
@ -92,15 +92,23 @@ static inline void dec_mm_active_cpus(struct mm_struct *mm)
|
|||
static inline void mm_context_add_copro(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* On hash, should only be called once over the lifetime of
|
||||
* the context, as we can't decrement the active cpus count
|
||||
* and flush properly for the time being.
|
||||
* If any copro is in use, increment the active CPU count
|
||||
* in order to force TLB invalidations to be global as to
|
||||
* propagate to the Nest MMU.
|
||||
*/
|
||||
inc_mm_active_cpus(mm);
|
||||
if (atomic_inc_return(&mm->context.copros) == 1)
|
||||
inc_mm_active_cpus(mm);
|
||||
}
|
||||
|
||||
static inline void mm_context_remove_copro(struct mm_struct *mm)
|
||||
{
|
||||
int c;
|
||||
|
||||
c = atomic_dec_if_positive(&mm->context.copros);
|
||||
|
||||
/* Detect imbalance between add and remove */
|
||||
WARN_ON(c < 0);
|
||||
|
||||
/*
|
||||
* Need to broadcast a global flush of the full mm before
|
||||
* decrementing active_cpus count, as the next TLBI may be
|
||||
|
@ -111,7 +119,7 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
|
|||
* for the time being. Invalidations will remain global if
|
||||
* used on hash.
|
||||
*/
|
||||
if (radix_enabled()) {
|
||||
if (c == 0 && radix_enabled()) {
|
||||
flush_all_mm(mm);
|
||||
dec_mm_active_cpus(mm);
|
||||
}
|
||||
|
|
|
@ -709,6 +709,9 @@ static __init void cpufeatures_cpu_quirks(void)
|
|||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
|
||||
else if ((version & 0xffffefff) == 0x004e0201)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
|
||||
|
||||
if ((version & 0xffff0000) == 0x004e0000)
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
|
||||
}
|
||||
|
||||
static void __init cpufeatures_setup_finished(void)
|
||||
|
@ -720,6 +723,9 @@ static void __init cpufeatures_setup_finished(void)
|
|||
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
|
||||
}
|
||||
|
||||
/* Make sure powerpc_base_platform is non-NULL */
|
||||
powerpc_base_platform = cur_cpu_spec->platform;
|
||||
|
||||
system_registers.lpcr = mfspr(SPRN_LPCR);
|
||||
system_registers.hfscr = mfspr(SPRN_HFSCR);
|
||||
system_registers.fscr = mfspr(SPRN_FSCR);
|
||||
|
|
|
@ -706,7 +706,7 @@ EXC_COMMON_BEGIN(bad_addr_slb)
|
|||
ld r3, PACA_EXSLB+EX_DAR(r13)
|
||||
std r3, _DAR(r1)
|
||||
beq cr6, 2f
|
||||
li r10, 0x480 /* fix trap number for I-SLB miss */
|
||||
li r10, 0x481 /* fix trap number for I-SLB miss */
|
||||
std r10, _TRAP(r1)
|
||||
2: bl save_nvgprs
|
||||
addi r3, r1, STACK_FRAME_OVERHEAD
|
||||
|
|
|
@ -476,6 +476,14 @@ void force_external_irq_replay(void)
|
|||
*/
|
||||
WARN_ON(!arch_irqs_disabled());
|
||||
|
||||
/*
|
||||
* Interrupts must always be hard disabled before irq_happened is
|
||||
* modified (to prevent lost update in case of interrupt between
|
||||
* load and store).
|
||||
*/
|
||||
__hard_irq_disable();
|
||||
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||
|
||||
/* Indicate in the PACA that we have an interrupt to replay */
|
||||
local_paca->irq_happened |= PACA_IRQ_EE;
|
||||
}
|
||||
|
|
|
@ -157,6 +157,9 @@ static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
|
|||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
|
||||
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG))
|
||||
asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
|
||||
: : "r" (addr), "r" (kvm->arch.lpid) : "memory");
|
||||
asm volatile("ptesync": : :"memory");
|
||||
}
|
||||
|
||||
|
|
|
@ -473,6 +473,17 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
|
|||
trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
|
||||
kvm->arch.lpid, 0, 0, 0);
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
|
||||
/*
|
||||
* Need the extra ptesync to make sure we don't
|
||||
* re-order the tlbie
|
||||
*/
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
|
||||
"r" (rbvalues[0]), "r" (kvm->arch.lpid));
|
||||
}
|
||||
|
||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||
kvm->arch.tlbie_lock = 0;
|
||||
} else {
|
||||
|
|
|
@ -320,7 +320,6 @@ kvm_novcpu_exit:
|
|||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
bl kvmhv_commence_exit
|
||||
nop
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
b kvmhv_switch_to_host
|
||||
|
||||
/*
|
||||
|
@ -1220,6 +1219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
|||
|
||||
secondary_too_late:
|
||||
li r12, 0
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
cmpdi r4, 0
|
||||
beq 11f
|
||||
stw r12, VCPU_TRAP(r4)
|
||||
|
@ -1558,12 +1558,12 @@ mc_cont:
|
|||
3: stw r5,VCPU_SLB_MAX(r9)
|
||||
|
||||
guest_bypass:
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
mr r3, r12
|
||||
/* Increment exit count, poke other threads to exit */
|
||||
bl kvmhv_commence_exit
|
||||
nop
|
||||
ld r9, HSTATE_KVM_VCPU(r13)
|
||||
lwz r12, VCPU_TRAP(r9)
|
||||
|
||||
/* Stop others sending VCPU interrupts to this physical CPU */
|
||||
li r0, -1
|
||||
|
@ -1898,6 +1898,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
|
|||
* POWER7/POWER8 guest -> host partition switch code.
|
||||
* We don't have to lock against tlbies but we do
|
||||
* have to coordinate the hardware threads.
|
||||
* Here STACK_SLOT_TRAP(r1) contains the trap number.
|
||||
*/
|
||||
kvmhv_switch_to_host:
|
||||
/* Secondary threads wait for primary to do partition switch */
|
||||
|
@ -1950,12 +1951,12 @@ BEGIN_FTR_SECTION
|
|||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
|
||||
/* If HMI, call kvmppc_realmode_hmi_handler() */
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
cmpwi r12, BOOK3S_INTERRUPT_HMI
|
||||
bne 27f
|
||||
bl kvmppc_realmode_hmi_handler
|
||||
nop
|
||||
cmpdi r3, 0
|
||||
li r12, BOOK3S_INTERRUPT_HMI
|
||||
/*
|
||||
* At this point kvmppc_realmode_hmi_handler may have resync-ed
|
||||
* the TB, and if it has, we must not subtract the guest timebase
|
||||
|
@ -2008,10 +2009,8 @@ BEGIN_FTR_SECTION
|
|||
lwz r8, KVM_SPLIT_DO_RESTORE(r3)
|
||||
cmpwi r8, 0
|
||||
beq 47f
|
||||
stw r12, STACK_SLOT_TRAP(r1)
|
||||
bl kvmhv_p9_restore_lpcr
|
||||
nop
|
||||
lwz r12, STACK_SLOT_TRAP(r1)
|
||||
b 48f
|
||||
47:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
|
@ -2049,6 +2048,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
|
|||
li r0, KVM_GUEST_MODE_NONE
|
||||
stb r0, HSTATE_IN_GUEST(r13)
|
||||
|
||||
lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
|
||||
ld r0, SFS+PPC_LR_STKOFF(r1)
|
||||
addi r1, r1, SFS
|
||||
mtlr r0
|
||||
|
|
|
@ -201,6 +201,15 @@ static inline unsigned long ___tlbie(unsigned long vpn, int psize,
|
|||
return va;
|
||||
}
|
||||
|
||||
static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
|
||||
/* Need the extra ptesync to ensure we don't reorder tlbie*/
|
||||
asm volatile("ptesync": : :"memory");
|
||||
___tlbie(vpn, psize, apsize, ssize);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
|
||||
{
|
||||
unsigned long rb;
|
||||
|
@ -278,6 +287,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
|
|||
asm volatile("ptesync": : :"memory");
|
||||
} else {
|
||||
__tlbie(vpn, psize, apsize, ssize);
|
||||
fixup_tlbie(vpn, psize, apsize, ssize);
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
if (lock_tlbie && !use_local)
|
||||
|
@ -771,7 +781,7 @@ static void native_hpte_clear(void)
|
|||
*/
|
||||
static void native_flush_hash_range(unsigned long number, int local)
|
||||
{
|
||||
unsigned long vpn;
|
||||
unsigned long vpn = 0;
|
||||
unsigned long hash, index, hidx, shift, slot;
|
||||
struct hash_pte *hptep;
|
||||
unsigned long hpte_v;
|
||||
|
@ -843,6 +853,10 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|||
__tlbie(vpn, psize, psize, ssize);
|
||||
} pte_iterate_hashed_end();
|
||||
}
|
||||
/*
|
||||
* Just do one more with the last used values.
|
||||
*/
|
||||
fixup_tlbie(vpn, psize, psize, ssize);
|
||||
asm volatile("eieio; tlbsync; ptesync":::"memory");
|
||||
|
||||
if (lock_tlbie)
|
||||
|
|
|
@ -173,6 +173,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|||
mm_iommu_init(mm);
|
||||
#endif
|
||||
atomic_set(&mm->context.active_cpus, 0);
|
||||
atomic_set(&mm->context.copros, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -481,6 +481,7 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
|
|||
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
|
||||
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
|
||||
}
|
||||
/* do we need fixup here ?*/
|
||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
|
||||
|
|
|
@ -119,6 +119,49 @@ static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
|
|||
trace_tlbie(0, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbiel_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 1, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbie_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void fixup_tlbie(void)
|
||||
{
|
||||
unsigned long pid = 0;
|
||||
unsigned long va = ((1UL << 52) - 1);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
|
||||
asm volatile("ptesync": : :"memory");
|
||||
__tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We use 128 set in radix mode and 256 set in hpt mode.
|
||||
*/
|
||||
|
@ -151,26 +194,27 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
|
|||
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
|
||||
{
|
||||
asm volatile("ptesync": : :"memory");
|
||||
__tlbie_pid(pid, ric);
|
||||
|
||||
/*
|
||||
* Workaround the fact that the "ric" argument to __tlbie_pid
|
||||
* must be a compile-time contraint to match the "i" constraint
|
||||
* in the asm statement.
|
||||
*/
|
||||
switch (ric) {
|
||||
case RIC_FLUSH_TLB:
|
||||
__tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
break;
|
||||
case RIC_FLUSH_PWC:
|
||||
__tlbie_pid(pid, RIC_FLUSH_PWC);
|
||||
break;
|
||||
case RIC_FLUSH_ALL:
|
||||
default:
|
||||
__tlbie_pid(pid, RIC_FLUSH_ALL);
|
||||
}
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
static inline void __tlbiel_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 1, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
|
||||
unsigned long pid, unsigned long page_size,
|
||||
unsigned long psize)
|
||||
|
@ -203,22 +247,6 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
|
|||
asm volatile("ptesync": : :"memory");
|
||||
}
|
||||
|
||||
static inline void __tlbie_va(unsigned long va, unsigned long pid,
|
||||
unsigned long ap, unsigned long ric)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
|
||||
rb = va & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = pid << PPC_BITLSHIFT(31);
|
||||
prs = 1; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
trace_tlbie(0, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
|
||||
static inline void __tlbie_va_range(unsigned long start, unsigned long end,
|
||||
unsigned long pid, unsigned long page_size,
|
||||
unsigned long psize)
|
||||
|
@ -237,6 +265,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
|||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
__tlbie_va(va, pid, ap, ric);
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
|
@ -248,6 +277,7 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
|
|||
if (also_pwc)
|
||||
__tlbie_pid(pid, RIC_FLUSH_PWC);
|
||||
__tlbie_va_range(start, end, pid, page_size, psize);
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
|
||||
|
@ -311,6 +341,16 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
|
|||
}
|
||||
EXPORT_SYMBOL(radix__local_flush_tlb_page);
|
||||
|
||||
static bool mm_needs_flush_escalation(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* P9 nest MMU has issues with the page walk cache
|
||||
* caching PTEs and not flushing them properly when
|
||||
* RIC = 0 for a PID/LPID invalidate
|
||||
*/
|
||||
return atomic_read(&mm->context.copros) != 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void radix__flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
|
@ -321,9 +361,12 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
|
|||
return;
|
||||
|
||||
preempt_disable();
|
||||
if (!mm_is_thread_local(mm))
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
else
|
||||
if (!mm_is_thread_local(mm)) {
|
||||
if (mm_needs_flush_escalation(mm))
|
||||
_tlbie_pid(pid, RIC_FLUSH_ALL);
|
||||
else
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
} else
|
||||
_tlbiel_pid(pid, RIC_FLUSH_TLB);
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -435,10 +478,14 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
}
|
||||
|
||||
if (full) {
|
||||
if (local)
|
||||
if (local) {
|
||||
_tlbiel_pid(pid, RIC_FLUSH_TLB);
|
||||
else
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
} else {
|
||||
if (mm_needs_flush_escalation(mm))
|
||||
_tlbie_pid(pid, RIC_FLUSH_ALL);
|
||||
else
|
||||
_tlbie_pid(pid, RIC_FLUSH_TLB);
|
||||
}
|
||||
} else {
|
||||
bool hflush = false;
|
||||
unsigned long hstart, hend;
|
||||
|
@ -465,6 +512,7 @@ void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
|||
if (hflush)
|
||||
__tlbie_va_range(hstart, hend, pid,
|
||||
HPAGE_PMD_SIZE, MMU_PAGE_2M);
|
||||
fixup_tlbie();
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
}
|
||||
}
|
||||
|
@ -548,6 +596,9 @@ static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
|
|||
}
|
||||
|
||||
if (full) {
|
||||
if (!local && mm_needs_flush_escalation(mm))
|
||||
also_pwc = true;
|
||||
|
||||
if (local)
|
||||
_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
|
||||
else
|
||||
|
@ -603,46 +654,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
|
|||
}
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||
|
||||
void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
|
||||
unsigned long page_size)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
unsigned long ap;
|
||||
unsigned long ric = RIC_FLUSH_TLB;
|
||||
|
||||
ap = mmu_get_ap(radix_get_mmu_psize(page_size));
|
||||
rb = gpa & ~(PPC_BITMASK(52, 63));
|
||||
rb |= ap << PPC_BITLSHIFT(58);
|
||||
rs = lpid & ((1UL << 32) - 1);
|
||||
prs = 0; /* process scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
|
||||
|
||||
void radix__flush_tlb_lpid(unsigned long lpid)
|
||||
{
|
||||
unsigned long rb,rs,prs,r;
|
||||
unsigned long ric = RIC_FLUSH_ALL;
|
||||
|
||||
rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
|
||||
rs = lpid & ((1UL << 32) - 1);
|
||||
prs = 0; /* partition scoped */
|
||||
r = 1; /* raidx format */
|
||||
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
||||
trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
|
||||
}
|
||||
EXPORT_SYMBOL(radix__flush_tlb_lpid);
|
||||
|
||||
void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
|
|
|
@ -163,13 +163,10 @@ static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
|
|||
pte_unmap(pte);
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
|
||||
static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t orig, pmd_t pmd)
|
||||
{
|
||||
pmd_t orig = *pmdp;
|
||||
|
||||
*pmdp = pmd;
|
||||
|
||||
if (mm == &init_mm)
|
||||
return;
|
||||
|
||||
|
@ -219,6 +216,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
|||
}
|
||||
}
|
||||
|
||||
void set_pmd_at(struct mm_struct *mm, unsigned long addr,
|
||||
pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
pmd_t orig = *pmdp;
|
||||
|
||||
*pmdp = pmd;
|
||||
__set_pmd_acct(mm, addr, orig, pmd);
|
||||
}
|
||||
|
||||
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
|
@ -227,6 +233,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
|
|||
do {
|
||||
old = *pmdp;
|
||||
} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
|
||||
__set_pmd_acct(vma->vm_mm, address, old, pmd);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
|
|
@ -315,19 +315,6 @@ config X86_L1_CACHE_SHIFT
|
|||
default "4" if MELAN || M486 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
|
||||
config X86_PPRO_FENCE
|
||||
bool "PentiumPro memory ordering errata workaround"
|
||||
depends on M686 || M586MMX || M586TSC || M586 || M486 || MGEODEGX1
|
||||
---help---
|
||||
Old PentiumPro multiprocessor systems had errata that could cause
|
||||
memory operations to violate the x86 ordering standard in rare cases.
|
||||
Enabling this option will attempt to work around some (but not all)
|
||||
occurrences of this problem, at the cost of much heavier spinlock and
|
||||
memory barrier operations.
|
||||
|
||||
If unsure, say n here. Even distro kernels should think twice before
|
||||
enabling this: there are few systems, and an unlikely bug.
|
||||
|
||||
config X86_F00F_BUG
|
||||
def_bool y
|
||||
depends on M586MMX || M586TSC || M586 || M486
|
||||
|
|
|
@ -223,6 +223,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
|
|||
|
||||
LDFLAGS := -m elf_$(UTS_MACHINE)
|
||||
|
||||
#
|
||||
# The 64-bit kernel must be aligned to 2MB. Pass -z max-page-size=0x200000 to
|
||||
# the linker to force 2MB page size regardless of the default page size used
|
||||
# by the linker.
|
||||
#
|
||||
ifdef CONFIG_X86_64
|
||||
LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
|
||||
endif
|
||||
|
||||
# Speed up the build
|
||||
KBUILD_CFLAGS += -pipe
|
||||
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
|
||||
|
|
|
@ -309,6 +309,10 @@ static void parse_elf(void *output)
|
|||
|
||||
switch (phdr->p_type) {
|
||||
case PT_LOAD:
|
||||
#ifdef CONFIG_X86_64
|
||||
if ((phdr->p_align % 0x200000) != 0)
|
||||
error("Alignment of LOAD segment isn't multiple of 2MB");
|
||||
#endif
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
dest = output;
|
||||
dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
|
||||
|
|
|
@ -1138,7 +1138,7 @@ apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \
|
|||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
|
||||
idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
|
||||
idtentry int3 do_int3 has_error_code=0
|
||||
idtentry stack_segment do_stack_segment has_error_code=1
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
#undef CONFIG_OPTIMIZE_INLINING
|
||||
#endif
|
||||
|
||||
#undef CONFIG_X86_PPRO_FENCE
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
|
|
|
@ -347,7 +347,7 @@ void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
|
|||
set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
|
||||
p4d = p4d_offset(pgd, VSYSCALL_ADDR);
|
||||
#if CONFIG_PGTABLE_LEVELS >= 5
|
||||
p4d->p4d |= _PAGE_USER;
|
||||
set_p4d(p4d, __p4d(p4d_val(*p4d) | _PAGE_USER));
|
||||
#endif
|
||||
pud = pud_offset(p4d, VSYSCALL_ADDR);
|
||||
set_pud(pud, __pud(pud_val(*pud) | _PAGE_USER));
|
||||
|
|
|
@ -2118,7 +2118,8 @@ static int x86_pmu_event_init(struct perf_event *event)
|
|||
event->destroy(event);
|
||||
}
|
||||
|
||||
if (READ_ONCE(x86_pmu.attr_rdpmc))
|
||||
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
|
||||
!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
|
||||
event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
|
||||
|
||||
return err;
|
||||
|
|
|
@ -2952,9 +2952,9 @@ static void intel_pebs_aliases_skl(struct perf_event *event)
|
|||
return intel_pebs_aliases_precdist(event);
|
||||
}
|
||||
|
||||
static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
|
||||
static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
|
||||
{
|
||||
unsigned long flags = x86_pmu.free_running_flags;
|
||||
unsigned long flags = x86_pmu.large_pebs_flags;
|
||||
|
||||
if (event->attr.use_clockid)
|
||||
flags &= ~PERF_SAMPLE_TIME;
|
||||
|
@ -2976,8 +2976,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||
if (!event->attr.freq) {
|
||||
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
|
||||
if (!(event->attr.sample_type &
|
||||
~intel_pmu_free_running_flags(event)))
|
||||
event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
|
||||
~intel_pmu_large_pebs_flags(event)))
|
||||
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
|
||||
}
|
||||
if (x86_pmu.pebs_aliases)
|
||||
x86_pmu.pebs_aliases(event);
|
||||
|
@ -3194,7 +3194,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
|
|||
X86_CONFIG(.event=0xc0, .umask=0x01)) {
|
||||
if (left < 128)
|
||||
left = 128;
|
||||
left &= ~0x3fu;
|
||||
left &= ~0x3fULL;
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
@ -3460,7 +3460,7 @@ static __initconst const struct x86_pmu core_pmu = {
|
|||
.event_map = intel_pmu_event_map,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.free_running_flags = PEBS_FREERUNNING_FLAGS,
|
||||
.large_pebs_flags = LARGE_PEBS_FLAGS,
|
||||
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32-bit width,
|
||||
|
@ -3502,7 +3502,7 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|||
.event_map = intel_pmu_event_map,
|
||||
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
|
||||
.apic = 1,
|
||||
.free_running_flags = PEBS_FREERUNNING_FLAGS,
|
||||
.large_pebs_flags = LARGE_PEBS_FLAGS,
|
||||
/*
|
||||
* Intel PMCs cannot be accessed sanely above 32 bit width,
|
||||
* so we install an artificial 1<<31 period regardless of
|
||||
|
|
|
@ -935,7 +935,7 @@ void intel_pmu_pebs_add(struct perf_event *event)
|
|||
bool needed_cb = pebs_needs_sched_cb(cpuc);
|
||||
|
||||
cpuc->n_pebs++;
|
||||
if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
|
||||
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
|
||||
cpuc->n_large_pebs++;
|
||||
|
||||
pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
|
||||
|
@ -975,7 +975,7 @@ void intel_pmu_pebs_del(struct perf_event *event)
|
|||
bool needed_cb = pebs_needs_sched_cb(cpuc);
|
||||
|
||||
cpuc->n_pebs--;
|
||||
if (hwc->flags & PERF_X86_EVENT_FREERUNNING)
|
||||
if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
|
||||
cpuc->n_large_pebs--;
|
||||
|
||||
pebs_update_state(needed_cb, cpuc, event->ctx->pmu);
|
||||
|
@ -1530,7 +1530,7 @@ void __init intel_ds_init(void)
|
|||
x86_pmu.pebs_record_size =
|
||||
sizeof(struct pebs_record_skl);
|
||||
x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
|
||||
x86_pmu.free_running_flags |= PERF_SAMPLE_TIME;
|
||||
x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -3343,6 +3343,7 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = {
|
|||
SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4),
|
||||
SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8),
|
||||
SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8),
|
||||
SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
|
@ -3562,24 +3563,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
/*
|
||||
* To determine the number of CHAs, it should read bits 27:0 in the CAPID6
|
||||
* register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083.
|
||||
*/
|
||||
#define SKX_CAPID6 0x9c
|
||||
#define SKX_CHA_BIT_MASK GENMASK(27, 0)
|
||||
|
||||
static int skx_count_chabox(void)
|
||||
{
|
||||
struct pci_dev *chabox_dev = NULL;
|
||||
int bus, count = 0;
|
||||
struct pci_dev *dev = NULL;
|
||||
u32 val = 0;
|
||||
|
||||
while (1) {
|
||||
chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev);
|
||||
if (!chabox_dev)
|
||||
break;
|
||||
if (count == 0)
|
||||
bus = chabox_dev->bus->number;
|
||||
if (bus != chabox_dev->bus->number)
|
||||
break;
|
||||
count++;
|
||||
}
|
||||
dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev);
|
||||
if (!dev)
|
||||
goto out;
|
||||
|
||||
pci_dev_put(chabox_dev);
|
||||
return count;
|
||||
pci_read_config_dword(dev, SKX_CAPID6, &val);
|
||||
val &= SKX_CHA_BIT_MASK;
|
||||
out:
|
||||
pci_dev_put(dev);
|
||||
return hweight32(val);
|
||||
}
|
||||
|
||||
void skx_uncore_cpu_init(void)
|
||||
|
|
|
@ -69,7 +69,7 @@ struct event_constraint {
|
|||
#define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */
|
||||
#define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */
|
||||
#define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */
|
||||
#define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */
|
||||
#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */
|
||||
|
||||
|
||||
struct amd_nb {
|
||||
|
@ -88,7 +88,7 @@ struct amd_nb {
|
|||
* REGS_USER can be handled for events limited to ring 3.
|
||||
*
|
||||
*/
|
||||
#define PEBS_FREERUNNING_FLAGS \
|
||||
#define LARGE_PEBS_FLAGS \
|
||||
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
|
||||
PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
|
||||
PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
|
||||
|
@ -608,7 +608,7 @@ struct x86_pmu {
|
|||
struct event_constraint *pebs_constraints;
|
||||
void (*pebs_aliases)(struct perf_event *event);
|
||||
int max_pebs_events;
|
||||
unsigned long free_running_flags;
|
||||
unsigned long large_pebs_flags;
|
||||
|
||||
/*
|
||||
* Intel LBR
|
||||
|
|
|
@ -52,11 +52,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|||
#define barrier_nospec() alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC, \
|
||||
"lfence", X86_FEATURE_LFENCE_RDTSC)
|
||||
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define dma_rmb() rmb()
|
||||
#else
|
||||
#define dma_rmb() barrier()
|
||||
#endif
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -68,30 +64,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
|||
#define __smp_wmb() barrier()
|
||||
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
|
||||
/*
|
||||
* For this option x86 doesn't have a strong TSO memory
|
||||
* model and we should fall back to full barriers.
|
||||
*/
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
#define __smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1 = READ_ONCE(*p); \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
__smp_mb(); \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#else /* regular x86 TSO memory ordering */
|
||||
|
||||
#define __smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
|
@ -107,8 +79,6 @@ do { \
|
|||
___p1; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
/* Atomic operations are already serializing on x86 */
|
||||
#define __smp_mb__before_atomic() barrier()
|
||||
#define __smp_mb__after_atomic() barrier()
|
||||
|
|
|
@ -316,6 +316,7 @@
|
|||
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
|
||||
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
||||
#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
|
||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
||||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
||||
|
@ -328,6 +329,7 @@
|
|||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
|
|
|
@ -232,21 +232,6 @@ extern void set_iounmap_nonlazy(void);
|
|||
*/
|
||||
#define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
|
||||
|
||||
/*
|
||||
* Cache management
|
||||
*
|
||||
* This needed for two cases
|
||||
* 1. Out of order aware processors
|
||||
* 2. Accidentally out of order processors (PPro errata #51)
|
||||
*/
|
||||
|
||||
static inline void flush_write_buffers(void)
|
||||
{
|
||||
#if defined(CONFIG_X86_PPRO_FENCE)
|
||||
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
extern void native_io_delay(void);
|
||||
|
|
|
@ -39,6 +39,7 @@ struct device;
|
|||
|
||||
enum ucode_state {
|
||||
UCODE_OK = 0,
|
||||
UCODE_NEW,
|
||||
UCODE_UPDATED,
|
||||
UCODE_NFOUND,
|
||||
UCODE_ERROR,
|
||||
|
|
|
@ -183,7 +183,10 @@
|
|||
* otherwise we'll run out of registers. We don't care about CET
|
||||
* here, anyway.
|
||||
*/
|
||||
# define CALL_NOSPEC ALTERNATIVE("call *%[thunk_target]\n", \
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE( \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
" jmp 904f;\n" \
|
||||
" .align 16\n" \
|
||||
"901: call 903f;\n" \
|
||||
|
|
|
@ -352,6 +352,7 @@ enum vmcs_field {
|
|||
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
|
||||
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
|
||||
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
|
||||
#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
|
||||
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
|
||||
|
||||
/* GUEST_INTERRUPTIBILITY_INFO flags. */
|
||||
|
|
|
@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
|
|||
/*
|
||||
* Early microcode releases for the Spectre v2 mitigation were broken.
|
||||
* Information taken from;
|
||||
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
|
||||
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
|
||||
* - https://kb.vmware.com/s/article/52345
|
||||
* - Microcode revisions observed in the wild
|
||||
* - Release note from 20180108 microcode release
|
||||
|
@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
|
|||
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
|
||||
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
|
||||
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
|
||||
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
|
||||
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
|
||||
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
|
||||
|
|
|
@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
|||
return -EINVAL;
|
||||
|
||||
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret != UCODE_OK)
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
|||
static enum ucode_state
|
||||
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
struct ucode_patch *p;
|
||||
enum ucode_state ret;
|
||||
|
||||
/* free old equiv table */
|
||||
free_equiv_cpu_table();
|
||||
|
||||
ret = __load_microcode_amd(family, data, size);
|
||||
|
||||
if (ret != UCODE_OK)
|
||||
if (ret != UCODE_OK) {
|
||||
cleanup();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* save BSP's matching patch for early load */
|
||||
if (save) {
|
||||
struct ucode_patch *p = find_patch(0);
|
||||
if (p) {
|
||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
||||
PATCH_MAX_SIZE));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
p = find_patch(0);
|
||||
if (!p) {
|
||||
return ret;
|
||||
} else {
|
||||
if (boot_cpu_data.microcode == p->patch_id)
|
||||
return ret;
|
||||
|
||||
ret = UCODE_NEW;
|
||||
}
|
||||
|
||||
/* save BSP's matching patch for early load */
|
||||
if (!save)
|
||||
return ret;
|
||||
|
||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -517,7 +517,29 @@ static int check_online_cpus(void)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static atomic_t late_cpus;
|
||||
static atomic_t late_cpus_in;
|
||||
static atomic_t late_cpus_out;
|
||||
|
||||
static int __wait_for_cpus(atomic_t *t, long long timeout)
|
||||
{
|
||||
int all_cpus = num_online_cpus();
|
||||
|
||||
atomic_inc(t);
|
||||
|
||||
while (atomic_read(t) < all_cpus) {
|
||||
if (timeout < SPINUNIT) {
|
||||
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
||||
all_cpus - atomic_read(t));
|
||||
return 1;
|
||||
}
|
||||
|
||||
ndelay(SPINUNIT);
|
||||
timeout -= SPINUNIT;
|
||||
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
|
@ -527,30 +549,16 @@ static atomic_t late_cpus;
|
|||
*/
|
||||
static int __reload_late(void *info)
|
||||
{
|
||||
unsigned int timeout = NSEC_PER_SEC;
|
||||
int all_cpus = num_online_cpus();
|
||||
int cpu = smp_processor_id();
|
||||
enum ucode_state err;
|
||||
int ret = 0;
|
||||
|
||||
atomic_dec(&late_cpus);
|
||||
|
||||
/*
|
||||
* Wait for all CPUs to arrive. A load will not be attempted unless all
|
||||
* CPUs show up.
|
||||
* */
|
||||
while (atomic_read(&late_cpus)) {
|
||||
if (timeout < SPINUNIT) {
|
||||
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
|
||||
atomic_read(&late_cpus));
|
||||
return -1;
|
||||
}
|
||||
|
||||
ndelay(SPINUNIT);
|
||||
timeout -= SPINUNIT;
|
||||
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
|
||||
return -1;
|
||||
|
||||
spin_lock(&update_lock);
|
||||
apply_microcode_local(&err);
|
||||
|
@ -558,15 +566,22 @@ static int __reload_late(void *info)
|
|||
|
||||
if (err > UCODE_NFOUND) {
|
||||
pr_warn("Error reloading microcode on CPU %d\n", cpu);
|
||||
ret = -1;
|
||||
} else if (err == UCODE_UPDATED) {
|
||||
return -1;
|
||||
/* siblings return UCODE_OK because their engine got updated already */
|
||||
} else if (err == UCODE_UPDATED || err == UCODE_OK) {
|
||||
ret = 1;
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
|
||||
atomic_inc(&late_cpus);
|
||||
|
||||
while (atomic_read(&late_cpus) != all_cpus)
|
||||
cpu_relax();
|
||||
/*
|
||||
* Increase the wait timeout to a safe value here since we're
|
||||
* serializing the microcode update and that could take a while on a
|
||||
* large number of CPUs. And that is fine as the *actual* timeout will
|
||||
* be determined by the last CPU finished updating and thus cut short.
|
||||
*/
|
||||
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
|
||||
panic("Timeout during microcode update!\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -579,12 +594,11 @@ static int microcode_reload_late(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
atomic_set(&late_cpus, num_online_cpus());
|
||||
atomic_set(&late_cpus_in, 0);
|
||||
atomic_set(&late_cpus_out, 0);
|
||||
|
||||
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (ret > 0)
|
||||
if (ret > 0)
|
||||
microcode_check();
|
||||
|
||||
return ret;
|
||||
|
@ -607,7 +621,7 @@ static ssize_t reload_store(struct device *dev,
|
|||
return size;
|
||||
|
||||
tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev, true);
|
||||
if (tmp_ret != UCODE_OK)
|
||||
if (tmp_ret != UCODE_NEW)
|
||||
return size;
|
||||
|
||||
get_online_cpus();
|
||||
|
@ -691,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
|
|||
if (system_state != SYSTEM_RUNNING)
|
||||
return UCODE_NFOUND;
|
||||
|
||||
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev,
|
||||
refresh_fw);
|
||||
|
||||
if (ustate == UCODE_OK) {
|
||||
ustate = microcode_ops->request_microcode_fw(cpu, µcode_pdev->dev, refresh_fw);
|
||||
if (ustate == UCODE_NEW) {
|
||||
pr_debug("CPU%d updated upon init\n", cpu);
|
||||
apply_microcode_on_target(cpu);
|
||||
}
|
||||
|
|
|
@ -862,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
unsigned int leftover = size;
|
||||
unsigned int curr_mc_size = 0, new_mc_size = 0;
|
||||
unsigned int csig, cpf;
|
||||
enum ucode_state ret = UCODE_OK;
|
||||
|
||||
while (leftover) {
|
||||
struct microcode_header_intel mc_header;
|
||||
|
@ -903,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
new_mc = mc;
|
||||
new_mc_size = mc_size;
|
||||
mc = NULL; /* trigger new vmalloc */
|
||||
ret = UCODE_NEW;
|
||||
}
|
||||
|
||||
ucode_ptr += mc_size;
|
||||
|
@ -932,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
|
|||
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
|
||||
cpu, new_rev, uci->cpu_sig.rev);
|
||||
|
||||
return UCODE_OK;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_ucode_fw(void *to, const void *from, size_t n)
|
||||
|
|
|
@ -160,7 +160,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
|
|||
*/
|
||||
static const __initconst struct idt_data dbg_idts[] = {
|
||||
INTG(X86_TRAP_DB, debug),
|
||||
INTG(X86_TRAP_BP, int3),
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -183,7 +182,6 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
|
|||
static const __initconst struct idt_data ist_idts[] = {
|
||||
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
|
||||
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
|
||||
#ifdef CONFIG_X86_MCE
|
||||
ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
|
||||
|
|
|
@ -37,7 +37,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
|||
WARN_ON(size == 0);
|
||||
if (!check_addr("map_single", dev, bus, size))
|
||||
return NOMMU_MAPPING_ERROR;
|
||||
flush_write_buffers();
|
||||
return bus;
|
||||
}
|
||||
|
||||
|
@ -72,25 +71,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
|||
return 0;
|
||||
s->dma_length = s->length;
|
||||
}
|
||||
flush_write_buffers();
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void nommu_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
|
||||
static void nommu_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
flush_write_buffers();
|
||||
}
|
||||
|
||||
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
return dma_addr == NOMMU_MAPPING_ERROR;
|
||||
|
@ -101,8 +84,6 @@ const struct dma_map_ops nommu_dma_ops = {
|
|||
.free = dma_generic_free_coherent,
|
||||
.map_sg = nommu_map_sg,
|
||||
.map_page = nommu_map_page,
|
||||
.sync_single_for_device = nommu_sync_single_for_device,
|
||||
.sync_sg_for_device = nommu_sync_sg_for_device,
|
||||
.is_phys = 1,
|
||||
.mapping_error = nommu_mapping_error,
|
||||
.dma_supported = x86_dma_supported,
|
||||
|
|
|
@ -577,7 +577,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
|
|||
}
|
||||
NOKPROBE_SYMBOL(do_general_protection);
|
||||
|
||||
/* May run on IST stack. */
|
||||
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
||||
{
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -592,6 +591,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
|||
if (poke_int3_handler(regs))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Use ist_enter despite the fact that we don't use an IST stack.
|
||||
* We can be called from a kprobe in non-CONTEXT_KERNEL kernel
|
||||
* mode or even during context tracking state changes.
|
||||
*
|
||||
* This means that we can't schedule. That's okay.
|
||||
*/
|
||||
ist_enter(regs);
|
||||
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
|
||||
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
|
||||
|
@ -609,15 +615,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
|
|||
SIGTRAP) == NOTIFY_STOP)
|
||||
goto exit;
|
||||
|
||||
/*
|
||||
* Let others (NMI) know that the debug stack is in use
|
||||
* as we may switch to the interrupt stack.
|
||||
*/
|
||||
debug_stack_usage_inc();
|
||||
cond_local_irq_enable(regs);
|
||||
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
|
||||
cond_local_irq_disable(regs);
|
||||
debug_stack_usage_dec();
|
||||
|
||||
exit:
|
||||
ist_exit(regs);
|
||||
}
|
||||
|
|
|
@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
|
|||
return;
|
||||
|
||||
check_vip:
|
||||
if (VEFLAGS & X86_EFLAGS_VIP) {
|
||||
if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
|
||||
(X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
|
||||
save_v86_state(regs, VM86_STI);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
|
|||
else
|
||||
pte_access &= ~ACC_WRITE_MASK;
|
||||
|
||||
if (!kvm_is_mmio_pfn(pfn))
|
||||
spte |= shadow_me_mask;
|
||||
|
||||
spte |= (u64)pfn << PAGE_SHIFT;
|
||||
spte |= shadow_me_mask;
|
||||
|
||||
if (pte_access & ACC_WRITE_MASK) {
|
||||
|
||||
|
|
|
@ -1045,6 +1045,13 @@ static inline bool is_machine_check(u32 intr_info)
|
|||
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
|
||||
}
|
||||
|
||||
/* Undocumented: icebp/int1 */
|
||||
static inline bool is_icebp(u32 intr_info)
|
||||
{
|
||||
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
|
||||
== (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
|
||||
}
|
||||
|
||||
static inline bool cpu_has_vmx_msr_bitmap(void)
|
||||
{
|
||||
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
|
||||
|
@ -6179,7 +6186,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
|||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
|
||||
vcpu->arch.dr6 &= ~15;
|
||||
vcpu->arch.dr6 |= dr6 | DR6_RTM;
|
||||
if (!(dr6 & ~DR6_RESERVED)) /* icebp */
|
||||
if (is_icebp(intr_info))
|
||||
skip_emulated_instruction(vcpu);
|
||||
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
|
|
|
@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
|||
if (!pmd_k)
|
||||
return -1;
|
||||
|
||||
if (pmd_huge(*pmd_k))
|
||||
if (pmd_large(*pmd_k))
|
||||
return 0;
|
||||
|
||||
pte_k = pte_offset_kernel(pmd_k, address);
|
||||
|
@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
|||
if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
|
||||
BUG();
|
||||
|
||||
if (pud_huge(*pud))
|
||||
if (pud_large(*pud))
|
||||
return 0;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
|
@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address)
|
|||
if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
|
||||
BUG();
|
||||
|
||||
if (pmd_huge(*pmd))
|
||||
if (pmd_large(*pmd))
|
||||
return 0;
|
||||
|
||||
pte_ref = pte_offset_kernel(pmd_ref, address);
|
||||
|
|
|
@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
|
|||
|
||||
#define PAGE_INUSE 0xFD
|
||||
|
||||
static void __meminit free_pagetable(struct page *page, int order,
|
||||
struct vmem_altmap *altmap)
|
||||
static void __meminit free_pagetable(struct page *page, int order)
|
||||
{
|
||||
unsigned long magic;
|
||||
unsigned int nr_pages = 1 << order;
|
||||
|
||||
if (altmap) {
|
||||
vmem_altmap_free(altmap, nr_pages);
|
||||
return;
|
||||
}
|
||||
|
||||
/* bootmem page has reserved flag */
|
||||
if (PageReserved(page)) {
|
||||
__ClearPageReserved(page);
|
||||
|
@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order,
|
|||
free_pages((unsigned long)page_address(page), order);
|
||||
}
|
||||
|
||||
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
|
||||
static void __meminit free_hugepage_table(struct page *page,
|
||||
struct vmem_altmap *altmap)
|
||||
{
|
||||
if (altmap)
|
||||
vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE);
|
||||
else
|
||||
free_pagetable(page, get_order(PMD_SIZE));
|
||||
}
|
||||
|
||||
static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
{
|
||||
pte_t *pte;
|
||||
int i;
|
||||
|
@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd,
|
|||
}
|
||||
|
||||
/* free a pte talbe */
|
||||
free_pagetable(pmd_page(*pmd), 0, altmap);
|
||||
free_pagetable(pmd_page(*pmd), 0);
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmd_clear(pmd);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
}
|
||||
|
||||
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
|
||||
struct vmem_altmap *altmap)
|
||||
static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
int i;
|
||||
|
@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud,
|
|||
}
|
||||
|
||||
/* free a pmd talbe */
|
||||
free_pagetable(pud_page(*pud), 0, altmap);
|
||||
free_pagetable(pud_page(*pud), 0);
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pud_clear(pud);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
}
|
||||
|
||||
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
|
||||
struct vmem_altmap *altmap)
|
||||
static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
|
||||
{
|
||||
pud_t *pud;
|
||||
int i;
|
||||
|
@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
|
|||
}
|
||||
|
||||
/* free a pud talbe */
|
||||
free_pagetable(p4d_page(*p4d), 0, altmap);
|
||||
free_pagetable(p4d_page(*p4d), 0);
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
p4d_clear(p4d);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
|
@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d,
|
|||
|
||||
static void __meminit
|
||||
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
|
||||
struct vmem_altmap *altmap, bool direct)
|
||||
bool direct)
|
||||
{
|
||||
unsigned long next, pages = 0;
|
||||
pte_t *pte;
|
||||
|
@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
|
|||
* freed when offlining, or simplely not in use.
|
||||
*/
|
||||
if (!direct)
|
||||
free_pagetable(pte_page(*pte), 0, altmap);
|
||||
free_pagetable(pte_page(*pte), 0);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
|
@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
|
|||
|
||||
page_addr = page_address(pte_page(*pte));
|
||||
if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
|
||||
free_pagetable(pte_page(*pte), 0, altmap);
|
||||
free_pagetable(pte_page(*pte), 0);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
|
@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
|
|||
if (IS_ALIGNED(addr, PMD_SIZE) &&
|
||||
IS_ALIGNED(next, PMD_SIZE)) {
|
||||
if (!direct)
|
||||
free_pagetable(pmd_page(*pmd),
|
||||
get_order(PMD_SIZE),
|
||||
altmap);
|
||||
free_hugepage_table(pmd_page(*pmd),
|
||||
altmap);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmd_clear(pmd);
|
||||
|
@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
|
|||
page_addr = page_address(pmd_page(*pmd));
|
||||
if (!memchr_inv(page_addr, PAGE_INUSE,
|
||||
PMD_SIZE)) {
|
||||
free_pagetable(pmd_page(*pmd),
|
||||
get_order(PMD_SIZE),
|
||||
altmap);
|
||||
free_hugepage_table(pmd_page(*pmd),
|
||||
altmap);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmd_clear(pmd);
|
||||
|
@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
|
|||
}
|
||||
|
||||
pte_base = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
remove_pte_table(pte_base, addr, next, altmap, direct);
|
||||
free_pte_table(pte_base, pmd, altmap);
|
||||
remove_pte_table(pte_base, addr, next, direct);
|
||||
free_pte_table(pte_base, pmd);
|
||||
}
|
||||
|
||||
/* Call free_pmd_table() in remove_pud_table(). */
|
||||
|
@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
IS_ALIGNED(next, PUD_SIZE)) {
|
||||
if (!direct)
|
||||
free_pagetable(pud_page(*pud),
|
||||
get_order(PUD_SIZE),
|
||||
altmap);
|
||||
get_order(PUD_SIZE));
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pud_clear(pud);
|
||||
|
@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
if (!memchr_inv(page_addr, PAGE_INUSE,
|
||||
PUD_SIZE)) {
|
||||
free_pagetable(pud_page(*pud),
|
||||
get_order(PUD_SIZE),
|
||||
altmap);
|
||||
get_order(PUD_SIZE));
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pud_clear(pud);
|
||||
|
@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
|
|||
|
||||
pmd_base = pmd_offset(pud, 0);
|
||||
remove_pmd_table(pmd_base, addr, next, direct, altmap);
|
||||
free_pmd_table(pmd_base, pud, altmap);
|
||||
free_pmd_table(pmd_base, pud);
|
||||
}
|
||||
|
||||
if (direct)
|
||||
|
@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end,
|
|||
* to adapt for boot-time switching between 4 and 5 level page tables.
|
||||
*/
|
||||
if (CONFIG_PGTABLE_LEVELS == 5)
|
||||
free_pud_table(pud_base, p4d, altmap);
|
||||
free_pud_table(pud_base, p4d);
|
||||
}
|
||||
|
||||
if (direct)
|
||||
|
|
|
@ -702,4 +702,52 @@ int pmd_clear_huge(pmd_t *pmd)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pud_free_pmd_page - Clear pud entry and free pmd page.
|
||||
* @pud: Pointer to a PUD.
|
||||
*
|
||||
* Context: The pud range has been unmaped and TLB purged.
|
||||
* Return: 1 if clearing the entry succeeded. 0 otherwise.
|
||||
*/
|
||||
int pud_free_pmd_page(pud_t *pud)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
int i;
|
||||
|
||||
if (pud_none(*pud))
|
||||
return 1;
|
||||
|
||||
pmd = (pmd_t *)pud_page_vaddr(*pud);
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++)
|
||||
if (!pmd_free_pte_page(&pmd[i]))
|
||||
return 0;
|
||||
|
||||
pud_clear(pud);
|
||||
free_page((unsigned long)pmd);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* pmd_free_pte_page - Clear pmd entry and free pte page.
|
||||
* @pmd: Pointer to a PMD.
|
||||
*
|
||||
* Context: The pmd range has been unmaped and TLB purged.
|
||||
* Return: 1 if clearing the entry succeeded. 0 otherwise.
|
||||
*/
|
||||
int pmd_free_pte_page(pmd_t *pmd)
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
return 1;
|
||||
|
||||
pte = (pte_t *)pmd_page_vaddr(*pmd);
|
||||
pmd_clear(pmd);
|
||||
free_page((unsigned long)pte);
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
|
||||
|
|
|
@ -1188,7 +1188,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
* may converge on the last pass. In such case do one more
|
||||
* pass to emit the final image
|
||||
*/
|
||||
for (pass = 0; pass < 10 || image; pass++) {
|
||||
for (pass = 0; pass < 20 || image; pass++) {
|
||||
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
|
||||
if (proglen <= 0) {
|
||||
image = NULL;
|
||||
|
@ -1215,6 +1215,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
}
|
||||
}
|
||||
oldproglen = proglen;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (bpf_jit_enable > 1)
|
||||
|
|
|
@ -227,7 +227,7 @@ int __init efi_alloc_page_tables(void)
|
|||
if (!pud) {
|
||||
if (CONFIG_PGTABLE_LEVELS > 4)
|
||||
free_page((unsigned long) pgd_page_vaddr(*pgd));
|
||||
free_page((unsigned long)efi_pgd);
|
||||
free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,11 +30,7 @@
|
|||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define dma_rmb() rmb()
|
||||
#else /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_rmb() barrier()
|
||||
#endif /* CONFIG_X86_PPRO_FENCE */
|
||||
#define dma_wmb() barrier()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
|
|
@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
|
|||
res.start = gas->address;
|
||||
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
res.flags = IORESOURCE_MEM;
|
||||
res.end = res.start + ALIGN(gas->access_width, 4);
|
||||
res.end = res.start + ALIGN(gas->access_width, 4) - 1;
|
||||
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
res.flags = IORESOURCE_IO;
|
||||
res.end = res.start + gas->access_width;
|
||||
res.end = res.start + gas->access_width - 1;
|
||||
} else {
|
||||
pr_warn("Unsupported address space: %u\n",
|
||||
gas->space_id);
|
||||
|
|
|
@ -70,7 +70,6 @@ static async_cookie_t async_cookie;
|
|||
static bool battery_driver_registered;
|
||||
static int battery_bix_broken_package;
|
||||
static int battery_notification_delay_ms;
|
||||
static int battery_full_discharging;
|
||||
static unsigned int cache_time = 1000;
|
||||
module_param(cache_time, uint, 0644);
|
||||
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
|
||||
|
@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
|
|||
return -ENODEV;
|
||||
switch (psp) {
|
||||
case POWER_SUPPLY_PROP_STATUS:
|
||||
if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) {
|
||||
if (battery_full_discharging && battery->rate_now == 0)
|
||||
val->intval = POWER_SUPPLY_STATUS_FULL;
|
||||
else
|
||||
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
} else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
|
||||
if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
|
||||
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
|
||||
else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
|
||||
val->intval = POWER_SUPPLY_STATUS_CHARGING;
|
||||
else if (acpi_battery_is_charged(battery))
|
||||
val->intval = POWER_SUPPLY_STATUS_FULL;
|
||||
|
@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
|
||||
{
|
||||
battery_full_discharging = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
||||
{
|
||||
.callback = battery_bix_broken_package_quirk,
|
||||
|
@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS GL502VSK",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS UX305LA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS UX360UA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = battery_full_discharging_quirk,
|
||||
.ident = "ASUS UX410UAK",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"),
|
||||
},
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
|
|||
else
|
||||
ndr_desc->numa_node = NUMA_NO_NODE;
|
||||
|
||||
if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
|
||||
/*
|
||||
* Persistence domain bits are hierarchical, if
|
||||
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
|
||||
* ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
|
||||
*/
|
||||
if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
|
||||
set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
|
||||
|
||||
if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
|
||||
else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
|
||||
set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
|
||||
|
||||
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
|
||||
|
|
|
@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
|
|||
*/
|
||||
int acpi_map_pxm_to_online_node(int pxm)
|
||||
{
|
||||
int node, n, dist, min_dist;
|
||||
int node, min_node;
|
||||
|
||||
node = acpi_map_pxm_to_node(pxm);
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = 0;
|
||||
|
||||
min_node = node;
|
||||
if (!node_online(node)) {
|
||||
min_dist = INT_MAX;
|
||||
int min_dist = INT_MAX, dist, n;
|
||||
|
||||
for_each_online_node(n) {
|
||||
dist = node_distance(node, n);
|
||||
if (dist < min_dist) {
|
||||
min_dist = dist;
|
||||
node = n;
|
||||
min_node = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return node;
|
||||
return min_node;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
|
||||
|
||||
|
|
|
@ -550,7 +550,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
|
||||
.driver_data = board_ahci_yes_fbs },
|
||||
|
||||
/* Promise */
|
||||
|
|
|
@ -665,6 +665,16 @@ int ahci_stop_engine(struct ata_port *ap)
|
|||
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Don't try to issue commands but return with ENODEV if the
|
||||
* AHCI controller not available anymore (e.g. due to PCIe hot
|
||||
* unplugging). Otherwise a 500ms delay for each port is added.
|
||||
*/
|
||||
if (tmp == 0xffffffff) {
|
||||
dev_err(ap->host->dev, "AHCI controller unavailable!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* setting HBA to idle */
|
||||
tmp &= ~PORT_CMD_START;
|
||||
writel(tmp, port_mmio + PORT_CMD);
|
||||
|
|
|
@ -340,7 +340,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
|
|||
* 2) regulator for controlling the targets power (optional)
|
||||
* 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
|
||||
* or for non devicetree enabled platforms a single clock
|
||||
* 4) phys (optional)
|
||||
* 4) phys (optional)
|
||||
*
|
||||
* RETURNS:
|
||||
* The allocated ahci_host_priv on success, otherwise an ERR_PTR value
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue