Merge 4.17-rc3 into char-misc-next

We want the fixes in here as well.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2018-04-30 05:05:54 -07:00
commit 720d690e36
396 changed files with 4219 additions and 2419 deletions

View File

@ -21,7 +21,7 @@ Required properties:
- interrupts : identifier to the device interrupt
- clocks : a list of phandle + clock-specifier pairs, one for each
entry in clock names.
- clocks-names :
- clock-names :
* "xtal" for external xtal clock identifier
* "pclk" for the bus core clock, either the clk81 clock or the gate clock
* "baud" for the source of the baudrate generator, can be either the xtal

View File

@ -24,7 +24,7 @@ Required properties:
- Must contain two elements for the extended variant of the IP
(marvell,armada-3700-uart-ext): "uart-tx" and "uart-rx",
respectively the UART TX interrupt and the UART RX interrupt. A
corresponding interrupts-names property must be defined.
corresponding interrupt-names property must be defined.
- For backward compatibility reasons, a single element interrupts
property is also supported for the standard variant of the IP,
containing only the UART sum interrupt. This form is deprecated

View File

@ -17,6 +17,8 @@ Required properties:
- "renesas,scifa-r8a7745" for R8A7745 (RZ/G1E) SCIFA compatible UART.
- "renesas,scifb-r8a7745" for R8A7745 (RZ/G1E) SCIFB compatible UART.
- "renesas,hscif-r8a7745" for R8A7745 (RZ/G1E) HSCIF compatible UART.
- "renesas,scif-r8a77470" for R8A77470 (RZ/G1C) SCIF compatible UART.
- "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
- "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
- "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
- "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.

View File

@ -28,7 +28,10 @@ Required properties:
- interrupts: one XHCI interrupt should be described here.
Optional properties:
- clocks: reference to a clock
- clocks: reference to the clocks
- clock-names: mandatory if there is a second clock, in this case
the name must be "core" for the first clock and "reg" for the
second one
- usb2-lpm-disable: indicate if we don't want to enable USB2 HW LPM
- usb3-lpm-capable: determines if platform is USB3 LPM capable
- quirk-broken-port-ped: set if the controller has broken port disable mechanism

View File

@ -17,17 +17,17 @@ an error is returned.
request_firmware
----------------
.. kernel-doc:: drivers/base/firmware_class.c
.. kernel-doc:: drivers/base/firmware_loader/main.c
:functions: request_firmware
request_firmware_direct
-----------------------
.. kernel-doc:: drivers/base/firmware_class.c
.. kernel-doc:: drivers/base/firmware_loader/main.c
:functions: request_firmware_direct
request_firmware_into_buf
-------------------------
.. kernel-doc:: drivers/base/firmware_class.c
.. kernel-doc:: drivers/base/firmware_loader/main.c
:functions: request_firmware_into_buf
Asynchronous firmware requests
@ -41,7 +41,7 @@ in atomic contexts.
request_firmware_nowait
-----------------------
.. kernel-doc:: drivers/base/firmware_class.c
.. kernel-doc:: drivers/base/firmware_loader/main.c
:functions: request_firmware_nowait
Special optimizations on reboot
@ -50,12 +50,12 @@ Special optimizations on reboot
Some devices have an optimization in place to enable the firmware to be
retained during system reboot. When such optimizations are used the driver
author must ensure the firmware is still available on resume from suspend,
this can be done with firmware_request_cache() insted of requesting for the
firmare to be loaded.
this can be done with firmware_request_cache() instead of requesting for the
firmware to be loaded.
firmware_request_cache()
-----------------------
.. kernel-doc:: drivers/base/firmware_class.c
------------------------
.. kernel-doc:: drivers/base/firmware_loader/main.c
:functions: firmware_request_cache
request firmware API expected driver use

View File

@ -28,7 +28,7 @@ Device Drivers Base
.. kernel-doc:: drivers/base/node.c
:internal:
.. kernel-doc:: drivers/base/firmware_class.c
.. kernel-doc:: drivers/base/firmware_loader/main.c
:export:
.. kernel-doc:: drivers/base/transport_class.c

View File

@ -210,7 +210,7 @@ If the connector is dual-role capable, there may also be a switch for the data
role. USB Type-C Connector Class does not supply separate API for them. The
port drivers can use USB Role Class API with those.
Illustration of the muxes behind a connector that supports an alternate mode:
Illustration of the muxes behind a connector that supports an alternate mode::
------------------------
| Connector |

View File

@ -9,8 +9,8 @@ i2c adapters present on your system at a given time. i2cdetect is part of
the i2c-tools package.
I2C device files are character device files with major device number 89
and a minor device number corresponding to the number assigned as
explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ...,
and a minor device number corresponding to the number assigned as
explained above. They should be called "i2c-%d" (i2c-0, i2c-1, ...,
i2c-10, ...). All 256 minor device numbers are reserved for i2c.
@ -23,11 +23,6 @@ First, you need to include these two headers:
#include <linux/i2c-dev.h>
#include <i2c/smbus.h>
(Please note that there are two files named "i2c-dev.h" out there. One is
distributed with the Linux kernel and the other one is included in the
source tree of i2c-tools. They used to be different in content but since 2012
they're identical. You should use "linux/i2c-dev.h").
Now, you have to decide which adapter you want to access. You should
inspect /sys/class/i2c-dev/ or run "i2cdetect -l" to decide this.
Adapter numbers are assigned somewhat dynamically, so you can not
@ -38,7 +33,7 @@ Next thing, open the device file, as follows:
int file;
int adapter_nr = 2; /* probably dynamically determined */
char filename[20];
snprintf(filename, 19, "/dev/i2c-%d", adapter_nr);
file = open(filename, O_RDWR);
if (file < 0) {
@ -72,8 +67,10 @@ the device supports them. Both are illustrated below.
/* res contains the read word */
}
/* Using I2C Write, equivalent of
i2c_smbus_write_word_data(file, reg, 0x6543) */
/*
* Using I2C Write, equivalent of
* i2c_smbus_write_word_data(file, reg, 0x6543)
*/
buf[0] = reg;
buf[1] = 0x43;
buf[2] = 0x65;
@ -140,14 +137,14 @@ ioctl(file, I2C_RDWR, struct i2c_rdwr_ioctl_data *msgset)
set in each message, overriding the values set with the above ioctl's.
ioctl(file, I2C_SMBUS, struct i2c_smbus_ioctl_data *args)
Not meant to be called directly; instead, use the access functions
below.
If possible, use the provided i2c_smbus_* methods described below instead
of issuing direct ioctls.
You can do plain i2c transactions by using read(2) and write(2) calls.
You do not need to pass the address byte; instead, set it through
ioctl I2C_SLAVE before you try to access the device.
You can do SMBus level transactions (see documentation file smbus-protocol
You can do SMBus level transactions (see documentation file smbus-protocol
for details) through the following functions:
__s32 i2c_smbus_write_quick(int file, __u8 value);
__s32 i2c_smbus_read_byte(int file);
@ -158,7 +155,7 @@ for details) through the following functions:
__s32 i2c_smbus_write_word_data(int file, __u8 command, __u16 value);
__s32 i2c_smbus_process_call(int file, __u8 command, __u16 value);
__s32 i2c_smbus_read_block_data(int file, __u8 command, __u8 *values);
__s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length,
__s32 i2c_smbus_write_block_data(int file, __u8 command, __u8 length,
__u8 *values);
All these transactions return -1 on failure; you can read errno to see
what happened. The 'write' transactions return 0 on success; the
@ -166,10 +163,9 @@ what happened. The 'write' transactions return 0 on success; the
returns the number of values read. The block buffers need not be longer
than 32 bytes.
The above functions are all inline functions, that resolve to calls to
the i2c_smbus_access function, that on its turn calls a specific ioctl
with the data in a specific format. Read the source code if you
want to know what happens behind the screens.
The above functions are made available by linking against the libi2c library,
which is provided by the i2c-tools project. See:
https://git.kernel.org/pub/scm/utils/i2c-tools/i2c-tools.git/.
Implementation details

View File

@ -217,7 +217,6 @@ Code Seq#(hex) Include File Comments
'd' 02-40 pcmcia/ds.h conflict!
'd' F0-FF linux/digi1.h
'e' all linux/digi1.h conflict!
'e' 00-1F drivers/net/irda/irtty-sir.h conflict!
'f' 00-1F linux/ext2_fs.h conflict!
'f' 00-1F linux/ext3_fs.h conflict!
'f' 00-0F fs/jfs/jfs_dinode.h conflict!
@ -247,7 +246,6 @@ Code Seq#(hex) Include File Comments
'm' all linux/synclink.h conflict!
'm' 00-19 drivers/message/fusion/mptctl.h conflict!
'm' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict!
'm' 00-1F net/irda/irmod.h conflict!
'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c
'n' 80-8F uapi/linux/nilfs2_api.h NILFS2
'n' E0-FF linux/matroxfb.h matroxfb

View File

@ -2126,18 +2126,3 @@ max_dgram_qlen - INTEGER
Default: 10
UNDOCUMENTED:
/proc/sys/net/irda/*
fast_poll_increase FIXME
warn_noreply_time FIXME
discovery_slots FIXME
slot_timeout FIXME
max_baud_rate FIXME
discovery_timeout FIXME
lap_keepalive_time FIXME
max_noreply_time FIXME
max_tx_data_size FIXME
max_tx_window FIXME
min_tx_turn_time FIXME

View File

@ -168,7 +168,7 @@ update on the CPUs, as discussed below:
[Please bear in mind that the kernel requests the microcode images from
userspace, using the request_firmware() function defined in
drivers/base/firmware_class.c]
drivers/base/firmware_loader/main.c]
a. When all the CPUs are identical:

View File

@ -157,8 +157,5 @@ memory management. See ``include/sound/sndmagic.h`` for complete list of them. M
OSS sound drivers have their magic numbers constructed from the soundcard PCI
ID - these are not listed here as well.
IrDA subsystem also uses large number of own magic numbers, see
``include/net/irda/irda.h`` for a complete list of them.
HFS is another larger user of magic numbers - you can find them in
``fs/hfs/hfs.h``.

View File

@ -461,9 +461,17 @@ of ftrace. Here is a list of some of the key files:
and ticks at the same rate as the hardware clocksource.
boot:
Same as mono. Used to be a separate clock which accounted
for the time spent in suspend while CLOCK_MONOTONIC did
not.
This is the boot clock (CLOCK_BOOTTIME) and is based on the
fast monotonic clock, but also accounts for time spent in
suspend. Since the clock access is designed for use in
tracing in the suspend path, some side effects are possible
if clock is accessed after the suspend time is accounted before
the fast mono clock is updated. In this case, the clock update
appears to happen slightly sooner than it normally would have.
Also on 32-bit systems, it's possible that the 64-bit boot offset
sees a partial update. These effects are rare and post
processing should be able to handle them. See comments in the
ktime_get_boot_fast_ns() function for more information.
To set a clock, simply echo the clock name into this file::

View File

@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
ARM 64-bit FP registers have the following id bit patterns:
0x4030 0000 0012 0 <regno:12>
ARM firmware pseudo-registers have the following bit pattern:
0x4030 0000 0014 <regno:16>
arm64 registers are mapped using the lower 32 bits. The upper 16 of
that is the register group type, or coprocessor number:
@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
arm64 system registers have the following id bit patterns:
0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
arm64 firmware pseudo-registers have the following bit pattern:
0x6030 0000 0014 <regno:16>
MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
the register group type:
@ -2510,7 +2516,8 @@ Possible features:
and execute guest code when KVM_RUN is called.
- KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
backward compatible with v0.2) for the CPU.
Depends on KVM_CAP_ARM_PSCI_0_2.
- KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
Depends on KVM_CAP_ARM_PMU_V3.

View File

@ -0,0 +1,30 @@
KVM implements the PSCI (Power State Coordination Interface)
specification in order to provide services such as CPU on/off, reset
and power-off to the guest.
The PSCI specification is regularly updated to provide new features,
and KVM implements these updates if they make sense from a virtualization
point of view.
This means that a guest booted on two different versions of KVM can
observe two different "firmware" revisions. This could cause issues if
a given guest is tied to a particular PSCI revision (unlikely), or if
a migration causes a different PSCI version to be exposed out of the
blue to an unsuspecting guest.
In order to remedy this situation, KVM exposes a set of "firmware
pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
interface. These registers can be saved/restored by userspace, and set
to a convenient value if required.
The following register is defined:
* KVM_REG_ARM_PSCI_VERSION:
- Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
(and thus has already been initialized)
- Returns the current PSCI version on GET_ONE_REG (defaulting to the
highest PSCI version implemented by KVM and compatible with v0.2)
- Allows any PSCI version implemented by KVM and compatible with
v0.2 to be set with SET_ONE_REG
- Affects the whole VM (even if the register view is per-vcpu)

View File

@ -564,8 +564,9 @@ S: Maintained
F: drivers/media/dvb-frontends/af9033*
AFFS FILE SYSTEM
M: David Sterba <dsterba@suse.com>
L: linux-fsdevel@vger.kernel.org
S: Orphan
S: Odd Fixes
F: Documentation/filesystems/affs.txt
F: fs/affs/
@ -905,6 +906,8 @@ ANDROID ION DRIVER
M: Laura Abbott <labbott@redhat.com>
M: Sumit Semwal <sumit.semwal@linaro.org>
L: devel@driverdev.osuosl.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
S: Supported
F: drivers/staging/android/ion
F: drivers/staging/android/uapi/ion.h
@ -1208,7 +1211,6 @@ F: drivers/*/*alpine*
ARM/ARTPEC MACHINE SUPPORT
M: Jesper Nilsson <jesper.nilsson@axis.com>
M: Lars Persson <lars.persson@axis.com>
M: Niklas Cassel <niklas.cassel@axis.com>
S: Maintained
L: linux-arm-kernel@axis.com
F: arch/arm/mach-artpec
@ -2617,7 +2619,7 @@ S: Maintained
F: drivers/net/hamradio/baycom*
BCACHE (BLOCK LAYER CACHE)
M: Michael Lyle <mlyle@lyle.org>
M: Coly Li <colyli@suse.de>
M: Kent Overstreet <kent.overstreet@gmail.com>
L: linux-bcache@vger.kernel.org
W: http://bcache.evilpiepirate.org
@ -7411,16 +7413,6 @@ S: Obsolete
F: include/uapi/linux/ipx.h
F: drivers/staging/ipx/
IRDA SUBSYSTEM
M: Samuel Ortiz <samuel@sortiz.org>
L: irda-users@lists.sourceforge.net (subscribers-only)
L: netdev@vger.kernel.org
W: http://irda.sourceforge.net/
S: Obsolete
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sameo/irda-2.6.git
F: Documentation/networking/irda.txt
F: drivers/staging/irda/
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
M: Marc Zyngier <marc.zyngier@arm.com>
S: Maintained
@ -7753,7 +7745,7 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE FOR ARM (KVM/arm)
M: Christoffer Dall <christoffer.dall@linaro.org>
M: Christoffer Dall <christoffer.dall@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
@ -7767,7 +7759,7 @@ F: virt/kvm/arm/
F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
M: Christoffer Dall <christoffer.dall@linaro.org>
M: Christoffer Dall <christoffer.dall@arm.com>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
@ -10909,7 +10901,6 @@ F: drivers/pci/host/
F: drivers/pci/dwc/
PCIE DRIVER FOR AXIS ARTPEC
M: Niklas Cassel <niklas.cassel@axis.com>
M: Jesper Nilsson <jesper.nilsson@axis.com>
L: linux-arm-kernel@axis.com
L: linux-pci@vger.kernel.org
@ -13965,7 +13956,7 @@ THUNDERBOLT DRIVER
M: Andreas Noever <andreas.noever@gmail.com>
M: Michael Jamet <michael.jamet@intel.com>
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <yehezkel.bernat@intel.com>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
S: Maintained
F: Documentation/admin-guide/thunderbolt.rst
@ -13975,7 +13966,7 @@ F: include/linux/thunderbolt.h
THUNDERBOLT NETWORK DRIVER
M: Michael Jamet <michael.jamet@intel.com>
M: Mika Westerberg <mika.westerberg@linux.intel.com>
M: Yehezkel Bernat <yehezkel.bernat@intel.com>
M: Yehezkel Bernat <YehezkelShB@gmail.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/thunderbolt.c

View File

@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 17
SUBLEVEL = 0
EXTRAVERSION = -rc2
EXTRAVERSION = -rc3
NAME = Fearless Coyote
# *DOCUMENTATION*

View File

@ -134,37 +134,37 @@ mux {
function = "gmii";
groups = "gmii_gmac0_grp";
};
/* Settings come from OpenWRT */
/* Settings come from OpenWRT, pins on SL3516 */
conf0 {
pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV";
pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV";
skew-delay = <0>;
};
conf1 {
pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC";
pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC";
skew-delay = <15>;
};
conf2 {
pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN";
pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN";
skew-delay = <7>;
};
conf3 {
pins = "V7 GMAC0 TXC";
pins = "U8 GMAC0 TXC";
skew-delay = <11>;
};
conf4 {
pins = "P10 GMAC1 TXC";
pins = "V11 GMAC1 TXC";
skew-delay = <10>;
};
conf5 {
/* The data lines all have default skew */
pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1",
"P9 GMAC0 RXD2", "R9 GMAC0 RXD3",
"U7 GMAC0 TXD0", "T7 GMAC0 TXD1",
"R7 GMAC0 TXD2", "P7 GMAC0 TXD3",
"R11 GMAC1 RXD0", "P11 GMAC1 RXD1",
"V12 GMAC1 RXD2", "U12 GMAC1 RXD3",
"R10 GMAC1 TXD0", "T10 GMAC1 TXD1",
"U10 GMAC1 TXD2", "V10 GMAC1 TXD3";
pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
"Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
"T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
"V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
"Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
"T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
"U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
"W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
skew-delay = <7>;
};
/* Set up drive strength on GMAC0 to 16 mA */

View File

@ -163,10 +163,10 @@ cm1_clockdomains: clockdomains {
cm2: cm2@8000 {
compatible = "ti,omap4-cm2", "simple-bus";
reg = <0x8000 0x3000>;
reg = <0x8000 0x2000>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x8000 0x3000>;
ranges = <0 0x8000 0x2000>;
cm2_clocks: clocks {
#address-cells = <1>;
@ -250,11 +250,11 @@ counter32k: counter@4000 {
prm: prm@6000 {
compatible = "ti,omap4-prm";
reg = <0x6000 0x3000>;
reg = <0x6000 0x2000>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x6000 0x3000>;
ranges = <0 0x6000 0x2000>;
prm_clocks: clocks {
#address-cells = <1>;

View File

@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_USER_NS=y
CONFIG_RELAY=y
@ -12,15 +13,21 @@ CONFIG_ARCH_GEMINI=y
CONFIG_PCI=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyS0,115200n8"
CONFIG_KEXEC=y
CONFIG_BINFMT_MISC=y
CONFIG_PM=y
CONFIG_NET=y
CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_STAA=y
@ -33,6 +40,11 @@ CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
CONFIG_PATA_FTIDE010=y
CONFIG_NETDEVICES=y
CONFIG_GEMINI_ETHERNET=y
CONFIG_MDIO_BITBANG=y
CONFIG_MDIO_GPIO=y
CONFIG_REALTEK_PHY=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@ -43,9 +55,19 @@ CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_I2C_GPIO=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_SENSORS_GPIO_FAN=y
CONFIG_SENSORS_LM75=y
CONFIG_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_GEMINI_WATCHDOG=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_ILITEK_IL9322=y
CONFIG_DRM_TVE200=y
CONFIG_LOGO=y
CONFIG_USB=y
CONFIG_USB_MON=y
CONFIG_USB_FOTG210_HCD=y
@ -54,6 +76,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_DMADEVICES=y

View File

@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_DENALI_DT=y
CONFIG_MTD_SPI_NOR=y
# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
CONFIG_SPI_CADENCE_QUADSPI=y
CONFIG_OF_OVERLAY=y
CONFIG_OF_CONFIGFS=y

View File

@ -77,6 +77,9 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
int max_vcpus;
/* Mandated version of PSCI */
u32 psci_version;
};
#define KVM_NR_MEM_OBJS 40

View File

@ -195,6 +195,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_VFP_FPINST 0x1009
#define KVM_REG_ARM_VFP_FPINST2 0x100A
/* KVM-as-firmware specific pseudo-registers */
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1

View File

@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
#include <asm/kvm.h>
@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
+ kvm_arm_get_fw_num_regs(vcpu)
+ NUM_TIMER_REGS;
}
@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++;
}
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_get_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_set_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);

View File

@ -243,8 +243,4 @@ arch/arm/mach-omap2/pm-asm-offsets.s: arch/arm/mach-omap2/pm-asm-offsets.c
include/generated/ti-pm-asm-offsets.h: arch/arm/mach-omap2/pm-asm-offsets.s FORCE
$(call filechk,offsets,__TI_PM_ASM_OFFSETS_H__)
# For rule to generate ti-emif-asm-offsets.h dependency
include drivers/memory/Makefile.asm-offsets
arch/arm/mach-omap2/sleep33xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
arch/arm/mach-omap2/sleep43xx.o: include/generated/ti-pm-asm-offsets.h include/generated/ti-emif-asm-offsets.h
$(obj)/sleep33xx.o $(obj)/sleep43xx.o: include/generated/ti-pm-asm-offsets.h

View File

@ -7,9 +7,12 @@
#include <linux/kbuild.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/ti-emif-sram.h>
int main(void)
{
ti_emif_asm_offsets();
DEFINE(AMX3_PM_WFI_FLAGS_OFFSET,
offsetof(struct am33xx_pm_sram_data, wfi_flags));
DEFINE(AMX3_PM_L2_AUX_CTRL_VAL_OFFSET,

View File

@ -6,7 +6,6 @@
* Dave Gerlach, Vaibhav Bedia
*/
#include <generated/ti-emif-asm-offsets.h>
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
#include <linux/ti-emif-sram.h>

View File

@ -6,7 +6,6 @@
* Dave Gerlach, Vaibhav Bedia
*/
#include <generated/ti-emif-asm-offsets.h>
#include <generated/ti-pm-asm-offsets.h>
#include <linux/linkage.h>
#include <linux/ti-emif-sram.h>

View File

@ -427,9 +427,9 @@ static struct gpiod_lookup_table jive_wm8750_gpiod_table = {
.dev_id = "spi_gpio",
.table = {
GPIO_LOOKUP("GPIOB", 4,
"gpio-sck", GPIO_ACTIVE_HIGH),
"sck", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("GPIOB", 9,
"gpio-mosi", GPIO_ACTIVE_HIGH),
"mosi", GPIO_ACTIVE_HIGH),
GPIO_LOOKUP("GPIOH", 10,
"cs", GPIO_ACTIVE_HIGH),
{ },

View File

@ -56,7 +56,11 @@ KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
ifeq ($(cc-name),clang)
KBUILD_CFLAGS += -DCONFIG_ARCH_SUPPORTS_INT128
else
KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
endif
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian

View File

@ -212,3 +212,7 @@ &uart_AO {
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
&usb0 {
status = "okay";
};

View File

@ -271,3 +271,15 @@ &uart_AO {
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
&usb0 {
status = "okay";
};
&usb2_phy0 {
/*
* even though the schematics don't show it:
* HDMI_5V is also used as supply for the USB VBUS.
*/
phy-supply = <&hdmi_5v>;
};

View File

@ -215,3 +215,7 @@ &uart_AO {
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
&usb0 {
status = "okay";
};

View File

@ -185,3 +185,7 @@ &uart_AO {
pinctrl-0 = <&uart_ao_a_pins>;
pinctrl-names = "default";
};
&usb0 {
status = "okay";
};

View File

@ -20,6 +20,67 @@ secmon_reserved_alt: secmon@5000000 {
no-map;
};
};
soc {
usb0: usb@c9000000 {
status = "disabled";
compatible = "amlogic,meson-gxl-dwc3";
#address-cells = <2>;
#size-cells = <2>;
ranges;
clocks = <&clkc CLKID_USB>;
clock-names = "usb_general";
resets = <&reset RESET_USB_OTG>;
reset-names = "usb_otg";
dwc3: dwc3@c9000000 {
compatible = "snps,dwc3";
reg = <0x0 0xc9000000 0x0 0x100000>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
dr_mode = "host";
maximum-speed = "high-speed";
snps,dis_u2_susphy_quirk;
phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>;
};
};
};
};
&apb {
usb2_phy0: phy@78000 {
compatible = "amlogic,meson-gxl-usb2-phy";
#phy-cells = <0>;
reg = <0x0 0x78000 0x0 0x20>;
clocks = <&clkc CLKID_USB>;
clock-names = "phy";
resets = <&reset RESET_USB_OTG>;
reset-names = "phy";
status = "okay";
};
usb2_phy1: phy@78020 {
compatible = "amlogic,meson-gxl-usb2-phy";
#phy-cells = <0>;
reg = <0x0 0x78020 0x0 0x20>;
clocks = <&clkc CLKID_USB>;
clock-names = "phy";
resets = <&reset RESET_USB_OTG>;
reset-names = "phy";
status = "okay";
};
usb3_phy: phy@78080 {
compatible = "amlogic,meson-gxl-usb3-phy";
#phy-cells = <0>;
reg = <0x0 0x78080 0x0 0x20>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clkc CLKID_USB>, <&clkc_AO CLKID_AO_CEC_32K>;
clock-names = "phy", "peripheral";
resets = <&reset RESET_USB_OTG>, <&reset RESET_USB_OTG>;
reset-names = "phy", "peripheral";
status = "okay";
};
};
&ethmac {

View File

@ -406,3 +406,7 @@ &saradc {
status = "okay";
vref-supply = <&vddio_ao18>;
};
&usb0 {
status = "okay";
};

View File

@ -80,6 +80,19 @@ cpu7: cpu@103 {
};
};
&apb {
usb2_phy2: phy@78040 {
compatible = "amlogic,meson-gxl-usb2-phy";
#phy-cells = <0>;
reg = <0x0 0x78040 0x0 0x20>;
clocks = <&clkc CLKID_USB>;
clock-names = "phy";
resets = <&reset RESET_USB_OTG>;
reset-names = "phy";
status = "okay";
};
};
&clkc_AO {
compatible = "amlogic,meson-gxm-aoclkc", "amlogic,meson-gx-aoclkc";
};
@ -100,3 +113,7 @@ &vpu {
&hdmi_tx {
compatible = "amlogic,meson-gxm-dw-hdmi", "amlogic,meson-gx-dw-hdmi";
};
&dwc3 {
phys = <&usb3_phy>, <&usb2_phy0>, <&usb2_phy1>, <&usb2_phy2>;
};

View File

@ -56,8 +56,6 @@ mb_fixed_3v3: mcc-sb-3v3 {
gpio_keys {
compatible = "gpio-keys";
#address-cells = <1>;
#size-cells = <0>;
power-button {
debounce_interval = <50>;

View File

@ -36,11 +36,11 @@ sata {
#size-cells = <1>;
ranges = <0x0 0x0 0x67d00000 0x00800000>;
sata0: ahci@210000 {
sata0: ahci@0 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00210000 0x1000>;
reg = <0x00000000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -52,9 +52,9 @@ sata0_port0: sata-port@0 {
};
};
sata_phy0: sata_phy@212100 {
sata_phy0: sata_phy@2100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00212100 0x1000>;
reg = <0x00002100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;
@ -66,11 +66,11 @@ sata0_phy0: sata-phy@0 {
};
};
sata1: ahci@310000 {
sata1: ahci@10000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00310000 0x1000>;
reg = <0x00010000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -82,9 +82,9 @@ sata1_port0: sata-port@0 {
};
};
sata_phy1: sata_phy@312100 {
sata_phy1: sata_phy@12100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00312100 0x1000>;
reg = <0x00012100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;
@ -96,11 +96,11 @@ sata1_phy0: sata-phy@0 {
};
};
sata2: ahci@120000 {
sata2: ahci@20000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00120000 0x1000>;
reg = <0x00020000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -112,9 +112,9 @@ sata2_port0: sata-port@0 {
};
};
sata_phy2: sata_phy@122100 {
sata_phy2: sata_phy@22100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00122100 0x1000>;
reg = <0x00022100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;
@ -126,11 +126,11 @@ sata2_phy0: sata-phy@0 {
};
};
sata3: ahci@130000 {
sata3: ahci@30000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00130000 0x1000>;
reg = <0x00030000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -142,9 +142,9 @@ sata3_port0: sata-port@0 {
};
};
sata_phy3: sata_phy@132100 {
sata_phy3: sata_phy@32100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00132100 0x1000>;
reg = <0x00032100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;
@ -156,11 +156,11 @@ sata3_phy0: sata-phy@0 {
};
};
sata4: ahci@330000 {
sata4: ahci@100000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00330000 0x1000>;
reg = <0x00100000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 351 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -172,9 +172,9 @@ sata4_port0: sata-port@0 {
};
};
sata_phy4: sata_phy@332100 {
sata_phy4: sata_phy@102100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00332100 0x1000>;
reg = <0x00102100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;
@ -186,11 +186,11 @@ sata4_phy0: sata-phy@0 {
};
};
sata5: ahci@400000 {
sata5: ahci@110000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00400000 0x1000>;
reg = <0x00110000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -202,9 +202,9 @@ sata5_port0: sata-port@0 {
};
};
sata_phy5: sata_phy@402100 {
sata_phy5: sata_phy@112100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00402100 0x1000>;
reg = <0x00112100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;
@ -216,11 +216,11 @@ sata5_phy0: sata-phy@0 {
};
};
sata6: ahci@410000 {
sata6: ahci@120000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00410000 0x1000>;
reg = <0x00120000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -232,9 +232,9 @@ sata6_port0: sata-port@0 {
};
};
sata_phy6: sata_phy@412100 {
sata_phy6: sata_phy@122100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00412100 0x1000>;
reg = <0x00122100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;
@ -246,11 +246,11 @@ sata6_phy0: sata-phy@0 {
};
};
sata7: ahci@420000 {
sata7: ahci@130000 {
compatible = "brcm,iproc-ahci", "generic-ahci";
reg = <0x00420000 0x1000>;
reg = <0x00130000 0x1000>;
reg-names = "ahci";
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
status = "disabled";
@ -262,9 +262,9 @@ sata7_port0: sata-port@0 {
};
};
sata_phy7: sata_phy@422100 {
sata_phy7: sata_phy@132100 {
compatible = "brcm,iproc-sr-sata-phy";
reg = <0x00422100 0x1000>;
reg = <0x00132100 0x1000>;
reg-names = "phy";
#address-cells = <1>;
#size-cells = <0>;

View File

@ -75,6 +75,9 @@ struct kvm_arch {
/* Interrupt controller */
struct vgic_dist vgic;
/* Mandated version of PSCI */
u32 psci_version;
};
#define KVM_NR_MEM_OBJS 40

View File

@ -39,7 +39,7 @@ struct mod_arch_specific {
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
Elf64_Sym *sym);
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val);
u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val);
#ifdef CONFIG_RANDOMIZE_BASE
extern u64 module_alloc_base;

View File

@ -230,7 +230,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
}
extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
extern void __sync_icache_dcache(pte_t pteval);
/*
* PTE bits configuration in the presence of hardware Dirty Bit Management
@ -253,7 +253,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t old_pte;
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte, addr);
__sync_icache_dcache(pte);
/*
* If the existing pte is valid, check for potential race with

View File

@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
/* KVM-as-firmware specific pseudo-registers */
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1

View File

@ -868,6 +868,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
static const struct midr_range kpti_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
{ /* sentinel */ }
};
char const *str = "command line option";

View File

@ -43,7 +43,7 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
}
#ifdef CONFIG_ARM64_ERRATUM_843419
u64 module_emit_adrp_veneer(struct module *mod, void *loc, u64 val)
u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
{
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
&mod->arch.init;

View File

@ -215,7 +215,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
insn &= ~BIT(31);
} else {
/* out of range for ADR -> emit a veneer */
val = module_emit_adrp_veneer(mod, place, val & ~0xfff);
val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
if (!val)
return -ENOEXEC;
insn = aarch64_insn_gen_branch_imm((u64)place, val,

View File

@ -25,6 +25,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/nospec.h>
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/user.h>
@ -249,15 +250,20 @@ static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
switch (note_type) {
case NT_ARM_HW_BREAK:
if (idx < ARM_MAX_BRP)
bp = tsk->thread.debug.hbp_break[idx];
if (idx >= ARM_MAX_BRP)
goto out;
idx = array_index_nospec(idx, ARM_MAX_BRP);
bp = tsk->thread.debug.hbp_break[idx];
break;
case NT_ARM_HW_WATCH:
if (idx < ARM_MAX_WRP)
bp = tsk->thread.debug.hbp_watch[idx];
if (idx >= ARM_MAX_WRP)
goto out;
idx = array_index_nospec(idx, ARM_MAX_WRP);
bp = tsk->thread.debug.hbp_watch[idx];
break;
}
out:
return bp;
}
@ -1458,9 +1464,7 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
{
int ret;
u32 kdata;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
/* Watchpoint */
if (num < 0) {
ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
@ -1471,7 +1475,6 @@ static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
} else {
ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
}
set_fs(old_fs);
if (!ret)
ret = put_user(kdata, data);
@ -1484,7 +1487,6 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
{
int ret;
u32 kdata = 0;
mm_segment_t old_fs = get_fs();
if (num == 0)
return 0;
@ -1493,12 +1495,10 @@ static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
if (ret)
return ret;
set_fs(KERNEL_DS);
if (num < 0)
ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
else
ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
set_fs(old_fs);
return ret;
}

View File

@ -277,7 +277,8 @@ void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
* If we were single stepping, we want to get the step exception after
* we return from the trap.
*/
user_fastforward_single_step(current);
if (user_mode(regs))
user_fastforward_single_step(current);
}
static LIST_HEAD(undef_hook);

View File

@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <kvm/arm_psci.h>
#include <asm/cputype.h>
#include <linux/uaccess.h>
#include <asm/kvm.h>
@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
{
return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
+ NUM_TIMER_REGS;
+ kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
}
/**
@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
uindices++;
}
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
if (ret)
return ret;
uindices += kvm_arm_get_fw_num_regs(vcpu);
ret = copy_timer_indices(vcpu, uindices);
if (ret)
return ret;
@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return get_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_get_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return get_timer_reg(vcpu, reg);
@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
return set_core_reg(vcpu, reg);
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
return kvm_arm_set_fw_reg(vcpu, reg);
if (is_timer_reg(reg->id))
return set_timer_reg(vcpu, reg);

View File

@ -996,14 +996,12 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
if (id == SYS_ID_AA64PFR0_EL1) {
if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n",
task_pid_nr(current));
kvm_debug("SVE unsupported for guests, suppressing\n");
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
} else if (id == SYS_ID_AA64MMFR1_EL1) {
if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
pr_err_once("kvm [%i]: LORegions unsupported for guests, suppressing\n",
task_pid_nr(current));
kvm_debug("LORegions unsupported for guests, suppressing\n");
val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
}

View File

@ -19,5 +19,9 @@ CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
-fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
-fcall-saved-x18 -fomit-frame-pointer
CFLAGS_REMOVE_atomic_ll_sc.o := -pg
GCOV_PROFILE_atomic_ll_sc.o := n
KASAN_SANITIZE_atomic_ll_sc.o := n
KCOV_INSTRUMENT_atomic_ll_sc.o := n
UBSAN_SANITIZE_atomic_ll_sc.o := n
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o

View File

@ -58,7 +58,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
flush_ptrace_access(vma, page, uaddr, dst, len);
}
void __sync_icache_dcache(pte_t pte, unsigned long addr)
void __sync_icache_dcache(pte_t pte)
{
struct page *page = pte_page(pte);

View File

@ -15,7 +15,7 @@
extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
unsigned long flags,
struct npu_context *(*cb)(struct npu_context *, void *),
void (*cb)(struct npu_context *, void *),
void *priv);
extern void pnv_npu2_destroy_context(struct npu_context *context,
struct pci_dev *gpdev);

View File

@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs,
if (pfn != ULONG_MAX) {
*phys_addr =
(pfn << PAGE_SHIFT);
handled = 1;
}
}
}
@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs,
* kernel/exception-64s.h
*/
if (get_paca()->in_mce < MAX_MCE_DEPTH)
if (!mce_find_instr_ea_and_pfn(regs, addr,
phys_addr))
handled = 1;
mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
}
found = 1;
}
@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs,
const struct mce_ierror_table itable[])
{
struct mce_error_info mce_err = { 0 };
uint64_t addr, phys_addr;
uint64_t addr, phys_addr = ULONG_MAX;
uint64_t srr1 = regs->msr;
long handled;

View File

@ -566,10 +566,35 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
#endif
#ifdef CONFIG_NMI_IPI
static void stop_this_cpu(struct pt_regs *regs)
#else
static void nmi_stop_this_cpu(struct pt_regs *regs)
{
/*
* This is a special case because it never returns, so the NMI IPI
* handling would never mark it as done, which makes any later
* smp_send_nmi_ipi() call spin forever. Mark it done now.
*
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
nmi_ipi_lock();
nmi_ipi_busy_count--;
nmi_ipi_unlock();
/* Remove this CPU */
set_cpu_online(smp_processor_id(), false);
spin_begin();
while (1)
spin_cpu_relax();
}
void smp_send_stop(void)
{
smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
}
#else /* CONFIG_NMI_IPI */
static void stop_this_cpu(void *dummy)
#endif
{
/* Remove this CPU */
set_cpu_online(smp_processor_id(), false);
@ -582,12 +607,22 @@ static void stop_this_cpu(void *dummy)
void smp_send_stop(void)
{
#ifdef CONFIG_NMI_IPI
smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000);
#else
static bool stopped = false;
/*
* Prevent waiting on csd lock from a previous smp_send_stop.
* This is racy, but in general callers try to do the right
* thing and only fire off one smp_send_stop (e.g., see
* kernel/panic.c)
*/
if (stopped)
return;
stopped = true;
smp_call_function(stop_this_cpu, NULL, 0);
#endif
}
#endif /* CONFIG_NMI_IPI */
struct thread_info *current_set[NR_CPUS];

View File

@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
}
#ifdef CONFIG_ALTIVEC
void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
}
#endif
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);

View File

@ -133,6 +133,7 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
start, start + size, rc);
return -EFAULT;
}
flush_inval_dcache_range(start, start + size);
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}
@ -159,6 +160,7 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
flush_inval_dcache_range(start, start + size);
ret = remove_section_mapping(start, start + size);
/* Ensure all vmalloc mappings are flushed in case they also

View File

@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
.open = simple_open,
};
static void flush_memory_region(u64 base, u64 size)
{
unsigned long line_size = ppc64_caches.l1d.size;
u64 end = base + size;
u64 addr;
base = round_down(base, line_size);
end = round_up(end, line_size);
for (addr = base; addr < end; addr += line_size)
asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
}
static int check_memblock_online(struct memory_block *mem, void *arg)
{
if (mem->state != MEM_ONLINE)
@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
change_memblock_state);
/* RCU grace period? */
flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
nr_pages << PAGE_SHIFT);
lock_device_hotplug();
remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
unlock_device_hotplug();

View File

@ -33,6 +33,19 @@
#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
/*
* spinlock to protect initialisation of an npu_context for a particular
* mm_struct.
*/
static DEFINE_SPINLOCK(npu_context_lock);
/*
* When an address shootdown range exceeds this threshold we invalidate the
* entire TLB on the GPU for the given PID rather than each specific address in
* the range.
*/
#define ATSD_THRESHOLD (2*1024*1024)
/*
* Other types of TCE cache invalidation are not functional in the
* hardware.
@ -401,7 +414,7 @@ struct npu_context {
bool nmmu_flush;
/* Callback to stop translation requests on a given GPU */
struct npu_context *(*release_cb)(struct npu_context *, void *);
void (*release_cb)(struct npu_context *context, void *priv);
/*
* Private pointer passed to the above callback for usage by
@ -671,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address;
for (address = start; address < end; address += PAGE_SIZE)
mmio_invalidate(npu_context, 1, address, false);
if (end - start > ATSD_THRESHOLD) {
/*
* Just invalidate the entire PID if the address range is too
* large.
*/
mmio_invalidate(npu_context, 0, 0, true);
} else {
for (address = start; address < end; address += PAGE_SIZE)
mmio_invalidate(npu_context, 1, address, false);
/* Do the flush only on the final addess == end */
mmio_invalidate(npu_context, 1, address, true);
/* Do the flush only on the final addess == end */
mmio_invalidate(npu_context, 1, address, true);
}
}
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@ -696,11 +717,12 @@ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
* Returns an error if there no contexts are currently available or a
* npu_context which should be passed to pnv_npu2_handle_fault().
*
* mmap_sem must be held in write mode.
* mmap_sem must be held in write mode and must not be called from interrupt
* context.
*/
struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
unsigned long flags,
struct npu_context *(*cb)(struct npu_context *, void *),
void (*cb)(struct npu_context *, void *),
void *priv)
{
int rc;
@ -743,7 +765,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
/*
* Setup the NPU context table for a particular GPU. These need to be
* per-GPU as we need the tables to filter ATSDs when there are no
* active contexts on a particular GPU.
* active contexts on a particular GPU. It is safe for these to be
* called concurrently with destroy as the OPAL call takes appropriate
* locks and refcounts on init/destroy.
*/
rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
@ -754,8 +778,29 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
* We store the npu pci device so we can more easily get at the
* associated npus.
*/
spin_lock(&npu_context_lock);
npu_context = mm->context.npu_context;
if (npu_context) {
if (npu_context->release_cb != cb ||
npu_context->priv != priv) {
spin_unlock(&npu_context_lock);
opal_npu_destroy_context(nphb->opal_id, mm->context.id,
PCI_DEVID(gpdev->bus->number,
gpdev->devfn));
return ERR_PTR(-EINVAL);
}
WARN_ON(!kref_get_unless_zero(&npu_context->kref));
}
spin_unlock(&npu_context_lock);
if (!npu_context) {
/*
* We can set up these fields without holding the
* npu_context_lock as the npu_context hasn't been returned to
* the caller meaning it can't be destroyed. Parallel allocation
* is protected against by mmap_sem.
*/
rc = -ENOMEM;
npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
if (npu_context) {
@ -774,8 +819,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
}
mm->context.npu_context = npu_context;
} else {
WARN_ON(!kref_get_unless_zero(&npu_context->kref));
}
npu_context->release_cb = cb;
@ -814,15 +857,16 @@ static void pnv_npu2_release_context(struct kref *kref)
mm_context_remove_copro(npu_context->mm);
npu_context->mm->context.npu_context = NULL;
mmu_notifier_unregister(&npu_context->mn,
npu_context->mm);
kfree(npu_context);
}
/*
* Destroy a context on the given GPU. May free the npu_context if it is no
* longer active on any GPUs. Must not be called from interrupt context.
*/
void pnv_npu2_destroy_context(struct npu_context *npu_context,
struct pci_dev *gpdev)
{
int removed;
struct pnv_phb *nphb;
struct npu *npu;
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
@ -844,7 +888,21 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
kref_put(&npu_context->kref, pnv_npu2_release_context);
spin_lock(&npu_context_lock);
removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
spin_unlock(&npu_context_lock);
/*
* We need to do this outside of pnv_npu2_release_context so that it is
* outside the spinlock as mmu_notifier_destroy uses SRCU.
*/
if (removed) {
mmu_notifier_unregister(&npu_context->mn,
npu_context->mm);
kfree(npu_context);
}
}
EXPORT_SYMBOL(pnv_npu2_destroy_context);

View File

@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
if (rc == OPAL_BUSY_EVENT)
if (rc == OPAL_BUSY_EVENT) {
mdelay(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
else if (rc == OPAL_BUSY)
mdelay(10);
} else if (rc == OPAL_BUSY) {
mdelay(OPAL_BUSY_DELAY_MS);
}
}
if (rc != OPAL_SUCCESS)
return 0;

View File

@ -11,6 +11,7 @@ config RISCV
select ARCH_WANT_FRAME_POINTERS
select CLONE_BACKWARDS
select COMMON_CLK
select DMA_DIRECT_OPS
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
select GENERIC_IRQ_SHOW
@ -89,9 +90,6 @@ config PGTABLE_LEVELS
config HAVE_KPROBES
def_bool n
config DMA_DIRECT_OPS
def_bool y
menu "Platform type"
choice

View File

@ -15,7 +15,6 @@ generic-y += fcntl.h
generic-y += futex.h
generic-y += hardirq.h
generic-y += hash.h
generic-y += handle_irq.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ioctls.h

View File

@ -52,7 +52,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
# Make sure only to export the intended __vdso_xxx symbol offsets.
quiet_cmd_vdsold = VDSOLD $@
cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
cmd_vdsold = $(CC) $(KCFLAGS) $(call cc-option, -no-pie) -nostdlib $(SYSCFLAGS_$(@F)) \
-Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
$(CROSS_COMPILE)objcopy \
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@

View File

@ -45,6 +45,9 @@ struct thread_info {
void arch_release_task_struct(struct task_struct *tsk);
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
#endif
/*

View File

@ -465,11 +465,11 @@ int module_finalize(const Elf_Ehdr *hdr,
apply_alternatives(aseg, aseg + s->sh_size);
if (IS_ENABLED(CONFIG_EXPOLINE) &&
(!strcmp(".nospec_call_table", secname)))
(!strncmp(".s390_indirect", secname, 14)))
nospec_revert(aseg, aseg + s->sh_size);
if (IS_ENABLED(CONFIG_EXPOLINE) &&
(!strcmp(".nospec_return_table", secname)))
(!strncmp(".s390_return", secname, 12)))
nospec_revert(aseg, aseg + s->sh_size);
}

View File

@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1);
CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1);
CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2);
CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3);
CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080);
CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082);
CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083);
@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db);
CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080);
CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080);
CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081);
CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082);
CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083);
@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = {
};
static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL),
CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES),
CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES),
CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES),
@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = {
};
static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL),
CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES),
CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES),
CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES),
CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES),

View File

@ -29,6 +29,7 @@
#include <linux/random.h>
#include <linux/export.h>
#include <linux/init_task.h>
#include <asm/cpu_mf.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/vtimer.h>
@ -48,6 +49,15 @@ void flush_thread(void)
{
}
void arch_setup_new_exec(void)
{
if (S390_lowcore.current_pid != current->pid) {
S390_lowcore.current_pid = current->pid;
if (test_facility(40))
lpp(&S390_lowcore.lpp);
}
}
void arch_release_task_struct(struct task_struct *tsk)
{
runtime_instr_release(tsk);

View File

@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
return orig;
}
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
if (ctx == RP_CHECK_CHAIN_CALL)
return user_stack_pointer(regs) <= ret->stack;
else
return user_stack_pointer(regs) < ret->stack;
}
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)

View File

@ -52,6 +52,7 @@ config X86
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
select ARCH_HAS_FILTER_PGPROT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
@ -273,6 +274,9 @@ config ARCH_HAS_CPU_RELAX
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
config ARCH_HAS_FILTER_PGPROT
def_bool y
config HAVE_SETUP_PER_CPU_AREA
def_bool y

View File

@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
pushq $0 /* pt_regs->r8 = 0 */
pushq %r8 /* pt_regs->r8 */
xorl %r8d, %r8d /* nospec r8 */
pushq $0 /* pt_regs->r9 = 0 */
pushq %r9 /* pt_regs->r9 */
xorl %r9d, %r9d /* nospec r9 */
pushq $0 /* pt_regs->r10 = 0 */
pushq %r10 /* pt_regs->r10 */
xorl %r10d, %r10d /* nospec r10 */
pushq $0 /* pt_regs->r11 = 0 */
pushq %r11 /* pt_regs->r11 */
xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */

View File

@ -3339,7 +3339,8 @@ static void intel_pmu_cpu_starting(int cpu)
cpuc->lbr_sel = NULL;
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
if (!cpuc->shared_regs)
return;
@ -3502,6 +3503,8 @@ static __initconst const struct x86_pmu core_pmu = {
.cpu_dying = intel_pmu_cpu_dying,
};
static struct attribute *intel_pmu_attrs[];
static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
@ -3533,6 +3536,8 @@ static __initconst const struct x86_pmu intel_pmu = {
.format_attrs = intel_arch3_formats_attr,
.events_sysfs_show = intel_event_sysfs_show,
.attrs = intel_pmu_attrs,
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
@ -3911,8 +3916,6 @@ __init int intel_pmu_init(void)
x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
x86_pmu.attrs = intel_pmu_attrs;
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events, when not running in a hypervisor:

View File

@ -320,6 +320,7 @@
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */

View File

@ -46,7 +46,21 @@ int ftrace_int3_handler(struct pt_regs *regs);
#endif /* CONFIG_FUNCTION_TRACER */
#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
#ifndef __ASSEMBLY__
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
/*
* Compare the symbol name with the system call name. Skip the
* "__x64_sys", "__ia32_sys" or simple "sys" prefix.
*/
return !strcmp(sym + 3, name + 3) ||
(!strncmp(sym, "__x64_", 6) && !strcmp(sym + 9, name + 3)) ||
(!strncmp(sym, "__ia32_", 7) && !strcmp(sym + 10, name + 3));
}
#ifndef COMPILE_OFFSETS
#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
#include <asm/compat.h>
@ -67,6 +81,7 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
return false;
}
#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
#endif /* !COMPILE_OFFSETS */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FTRACE_H */

View File

@ -34,11 +34,6 @@
* (0x80 is the syscall vector, 0x30-0x3f are for ISA)
*/
#define FIRST_EXTERNAL_VECTOR 0x20
/*
* We start allocating at 0x21 to spread out vectors evenly between
* priority levels. (0x80 is the syscall vector)
*/
#define VECTOR_OFFSET_START 1
/*
* Reserve the lowest usable vector (and hence lowest priority) 0x20 for
@ -119,8 +114,6 @@
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
#define FPU_IRQ 13
/*
* Size the maximum number of interrupts.
*

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL2.0 */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Jailhouse paravirt detection

View File

@ -601,6 +601,11 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
{
return canon_pgprot(prot);
}
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
enum page_cache_mode pcm,
enum page_cache_mode new_pcm)

View File

@ -105,14 +105,14 @@ extern unsigned int ptrs_per_p4d;
#define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define __VMALLOC_BASE_L4 0xffffc90000000000
#define __VMALLOC_BASE_L5 0xffa0000000000000
#define __VMALLOC_BASE_L4 0xffffc90000000000UL
#define __VMALLOC_BASE_L5 0xffa0000000000000UL
#define VMALLOC_SIZE_TB_L4 32UL
#define VMALLOC_SIZE_TB_L5 12800UL
#define __VMEMMAP_BASE_L4 0xffffea0000000000
#define __VMEMMAP_BASE_L5 0xffd4000000000000
#define __VMEMMAP_BASE_L4 0xffffea0000000000UL
#define __VMEMMAP_BASE_L5 0xffd4000000000000UL
#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
# define VMALLOC_START vmalloc_base

View File

@ -1 +1,32 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_X64_MSGBUF_H
#define __ASM_X64_MSGBUF_H
#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/msgbuf.h>
#else
/*
* The msqid64_ds structure for x86 architecture with x32 ABI.
*
* On x86-32 and x86-64 we can just use the generic definition, but
* x32 uses the same binary layout as x86_64, which is differnet
* from other 32-bit architectures.
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
__kernel_time_t msg_rtime; /* last msgrcv time */
__kernel_time_t msg_ctime; /* last change time */
__kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
__kernel_ulong_t msg_qnum; /* number of messages in queue */
__kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
__kernel_ulong_t __unused4;
__kernel_ulong_t __unused5;
};
#endif
#endif /* __ASM_GENERIC_MSGBUF_H */

View File

@ -1 +1,43 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_X86_SHMBUF_H
#define __ASM_X86_SHMBUF_H
#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/shmbuf.h>
#else
/*
* The shmid64_ds structure for x86 architecture with x32 ABI.
*
* On x86-32 and x86-64 we can just use the generic definition, but
* x32 uses the same binary layout as x86_64, which is differnet
* from other 32-bit architectures.
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
__kernel_time_t shm_dtime; /* last detach time */
__kernel_time_t shm_ctime; /* last change time */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
__kernel_ulong_t shm_nattch; /* no. of current attaches */
__kernel_ulong_t __unused4;
__kernel_ulong_t __unused5;
};
struct shminfo64 {
__kernel_ulong_t shmmax;
__kernel_ulong_t shmmin;
__kernel_ulong_t shmmni;
__kernel_ulong_t shmseg;
__kernel_ulong_t shmall;
__kernel_ulong_t __unused1;
__kernel_ulong_t __unused2;
__kernel_ulong_t __unused3;
__kernel_ulong_t __unused4;
};
#endif
#endif /* __ASM_X86_SHMBUF_H */

View File

@ -835,6 +835,9 @@ static const struct _tlb_table intel_tlb_table[] = {
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
{ 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
{ 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
{ 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
{ 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
{ 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
{ 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
{ 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },

View File

@ -564,14 +564,12 @@ static int __reload_late(void *info)
apply_microcode_local(&err);
spin_unlock(&update_lock);
/* siblings return UCODE_OK because their engine got updated already */
if (err > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
return -1;
/* siblings return UCODE_OK because their engine got updated already */
ret = -1;
} else if (err == UCODE_UPDATED || err == UCODE_OK) {
ret = 1;
} else {
return ret;
}
/*

View File

@ -485,7 +485,6 @@ static void show_saved_mc(void)
*/
static void save_mc_for_early(u8 *mc, unsigned int size)
{
#ifdef CONFIG_HOTPLUG_CPU
/* Synchronization during CPU hotplug. */
static DEFINE_MUTEX(x86_cpu_microcode_mutex);
@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
show_saved_mc();
mutex_unlock(&x86_cpu_microcode_mutex);
#endif
}
static bool load_builtin_intel_microcode(struct cpio_data *cp)

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL2.0
// SPDX-License-Identifier: GPL-2.0
/*
* Jailhouse paravirt_ops implementation
*

View File

@ -50,6 +50,7 @@
#include <linux/init_ohci1394_dma.h>
#include <linux/kvm_para.h>
#include <linux/dma-contiguous.h>
#include <xen/xen.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@ -534,6 +535,11 @@ static void __init reserve_crashkernel(void)
high = true;
}
if (xen_pv_domain()) {
pr_info("Ignoring crashkernel for a Xen PV domain\n");
return;
}
/* 0 means: find the address automatically */
if (crash_base <= 0) {
/*

View File

@ -1571,6 +1571,8 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
int i;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
if (!this_cpu_has(X86_FEATURE_CLFLUSH))

View File

@ -4544,12 +4544,6 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa);
}
static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu)
{
if (enable_ept)
vmx_flush_tlb(vcpu, true);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
@ -9278,7 +9272,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
} else {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
vmx_flush_tlb_ept_only(vcpu);
vmx_flush_tlb(vcpu, true);
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
@ -9306,7 +9300,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
!nested_cpu_has2(get_vmcs12(&vmx->vcpu),
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmcs_write64(APIC_ACCESS_ADDR, hpa);
vmx_flush_tlb_ept_only(vcpu);
vmx_flush_tlb(vcpu, true);
}
}
@ -11220,7 +11214,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
} else if (nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
vmx_flush_tlb(vcpu, true);
}
/*
@ -12073,7 +12067,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
} else if (!nested_cpu_has_ept(vmcs12) &&
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
vmx_flush_tlb_ept_only(vcpu);
vmx_flush_tlb(vcpu, true);
}
/* This is needed for same reason as it was needed in prepare_vmcs02 */

View File

@ -302,13 +302,6 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
__rem; \
})
#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
#define KVM_X86_DISABLE_EXITS_HTL (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_VALID_EXITS (KVM_X86_DISABLE_EXITS_MWAIT | \
KVM_X86_DISABLE_EXITS_HTL | \
KVM_X86_DISABLE_EXITS_PAUSE)
static inline bool kvm_mwait_in_guest(struct kvm *kvm)
{
return kvm->arch.mwait_in_guest;

View File

@ -93,6 +93,18 @@ void arch_report_meminfo(struct seq_file *m)
static inline void split_page_count(int level) { }
#endif
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
{
return addr >= start && addr < end;
}
static inline int
within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
{
return addr >= start && addr <= end;
}
#ifdef CONFIG_X86_64
static inline unsigned long highmap_start_pfn(void)
@ -106,20 +118,25 @@ static inline unsigned long highmap_end_pfn(void)
return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
}
static bool __cpa_pfn_in_highmap(unsigned long pfn)
{
/*
* Kernel text has an alias mapping at a high address, known
* here as "highmap".
*/
return within_inclusive(pfn, highmap_start_pfn(), highmap_end_pfn());
}
#else
static bool __cpa_pfn_in_highmap(unsigned long pfn)
{
/* There is no highmap on 32-bit */
return false;
}
#endif
static inline int
within(unsigned long addr, unsigned long start, unsigned long end)
{
return addr >= start && addr < end;
}
static inline int
within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
{
return addr >= start && addr <= end;
}
/*
* Flushing functions
*/
@ -172,7 +189,7 @@ static void __cpa_flush_all(void *arg)
static void cpa_flush_all(unsigned long cache)
{
BUG_ON(irqs_disabled());
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
}
@ -236,7 +253,7 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache,
unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
#endif
BUG_ON(irqs_disabled());
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
@ -1183,6 +1200,10 @@ static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
cpa->numpages = 1;
cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
return 0;
} else if (__cpa_pfn_in_highmap(cpa->pfn)) {
/* Faults in the highmap are OK, so do not warn: */
return -EFAULT;
} else {
WARN(1, KERN_WARNING "CPA: called for zero pte. "
"vaddr = %lx cpa->vaddr = %lx\n", vaddr,
@ -1335,8 +1356,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
* to touch the high mapped kernel as well:
*/
if (!within(vaddr, (unsigned long)_text, _brk_end) &&
within_inclusive(cpa->pfn, highmap_start_pfn(),
highmap_end_pfn())) {
__cpa_pfn_in_highmap(cpa->pfn)) {
unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
__START_KERNEL_map - phys_base;
alias_cpa = *cpa;

View File

@ -421,6 +421,16 @@ static inline bool pti_kernel_image_global_ok(void)
if (boot_cpu_has(X86_FEATURE_K8))
return false;
/*
* RANDSTRUCT derives its hardening benefits from the
* attacker's lack of knowledge about the layout of kernel
* data structures. Keep the kernel image non-global in
* cases where RANDSTRUCT is in use to help keep the layout a
* secret.
*/
if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
return false;
return true;
}
@ -430,12 +440,24 @@ static inline bool pti_kernel_image_global_ok(void)
*/
void pti_clone_kernel_text(void)
{
/*
* rodata is part of the kernel image and is normally
* readable on the filesystem or on the web. But, do not
* clone the areas past rodata, they might contain secrets.
*/
unsigned long start = PFN_ALIGN(_text);
unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
unsigned long end = (unsigned long)__end_rodata_hpage_align;
if (!pti_kernel_image_global_ok())
return;
pr_debug("mapping partial kernel image into user address space\n");
/*
* Note that this will undo _some_ of the work that
* pti_set_kernel_image_nonglobal() did to clear the
* global bit.
*/
pti_clone_pmds(start, end, _PAGE_RW);
}
@ -458,8 +480,6 @@ void pti_set_kernel_image_nonglobal(void)
if (pti_kernel_image_global_ok())
return;
pr_debug("set kernel image non-global\n");
set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
}

View File

@ -4934,8 +4934,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
bool new_queue = false;
bool bfqq_already_existing = false, split = false;
if (!rq->elv.icq)
/*
* Even if we don't have an icq attached, we should still clear
* the scheduler pointers, as they might point to previously
* allocated bic/bfqq structs.
*/
if (!rq->elv.icq) {
rq->elv.priv[0] = rq->elv.priv[1] = NULL;
return;
}
bic = icq_to_bic(rq->elv.icq);
spin_lock_irq(&bfqd->lock);

View File

@ -1177,26 +1177,20 @@ int blkcg_init_queue(struct request_queue *q)
preloaded = !radix_tree_preload(GFP_KERNEL);
/*
* Make sure the root blkg exists and count the existing blkgs. As
* @q is bypassing at this point, blkg_lookup_create() can't be
* used. Open code insertion.
*/
/* Make sure the root blkg exists. */
rcu_read_lock();
spin_lock_irq(q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg);
if (IS_ERR(blkg))
goto err_unlock;
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
if (IS_ERR(blkg))
return PTR_ERR(blkg);
q->root_blkg = blkg;
q->root_rl.blkg = blkg;
ret = blk_throtl_init(q);
if (ret) {
spin_lock_irq(q->queue_lock);
@ -1204,6 +1198,13 @@ int blkcg_init_queue(struct request_queue *q)
spin_unlock_irq(q->queue_lock);
}
return ret;
err_unlock:
spin_unlock_irq(q->queue_lock);
rcu_read_unlock();
if (preloaded)
radix_tree_preload_end();
return PTR_ERR(blkg);
}
/**
@ -1410,9 +1411,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
__clear_bit(pol->plid, q->blkcg_pols);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
/* grab blkcg lock too while removing @pd from @blkg */
spin_lock(&blkg->blkcg->lock);
if (blkg->pd[pol->plid]) {
if (!blkg->pd[pol->plid]->offline &&
pol->pd_offline_fn) {
@ -1422,8 +1420,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
pol->pd_free_fn(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
}
spin_unlock(&blkg->blkcg->lock);
}
spin_unlock_irq(q->queue_lock);

View File

@ -201,6 +201,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->part = NULL;
seqcount_init(&rq->gstate_seq);
u64_stats_init(&rq->aborted_gstate_sync);
/*
* See comment of blk_mq_init_request
*/
WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
}
EXPORT_SYMBOL(blk_rq_init);
@ -915,7 +919,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
while (true) {
bool success = false;
int ret;
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
@ -947,14 +950,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
*/
smp_rmb();
ret = wait_event_interruptible(q->mq_freeze_wq,
(atomic_read(&q->mq_freeze_depth) == 0 &&
(preempt || !blk_queue_preempt_only(q))) ||
blk_queue_dying(q));
wait_event(q->mq_freeze_wq,
(atomic_read(&q->mq_freeze_depth) == 0 &&
(preempt || !blk_queue_preempt_only(q))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
if (ret)
return ret;
}
}

View File

@ -2042,6 +2042,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
seqcount_init(&rq->gstate_seq);
u64_stats_init(&rq->aborted_gstate_sync);
/*
* start gstate with gen 1 instead of 0, otherwise it will be equal
* to aborted_gstate, and be identified timed out by
* blk_mq_terminate_expired.
*/
WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
return 0;
}
@ -2329,7 +2336,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
static void blk_mq_map_swqueue(struct request_queue *q)
{
unsigned int i;
unsigned int i, hctx_idx;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
@ -2346,8 +2353,23 @@ static void blk_mq_map_swqueue(struct request_queue *q)
/*
* Map software to hardware queues.
*
* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
hctx_idx = q->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
/*
* If tags initialization fail for some hctx,
* that hctx won't be brought online. In this
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
q->mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
hctx = blk_mq_map_queue(q, i);
@ -2359,8 +2381,21 @@ static void blk_mq_map_swqueue(struct request_queue *q)
mutex_unlock(&q->sysfs_lock);
queue_for_each_hw_ctx(q, hctx, i) {
/* every hctx should get mapped by at least one CPU */
WARN_ON(!hctx->nr_ctx);
/*
* If no software queues are mapped to this hardware queue,
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
/* Never unmap queue 0. We need it as a
* fallback in case of a new remap fails
* allocation
*/
if (i && set->tags[i])
blk_mq_free_map_and_requests(set, i);
hctx->tags = NULL;
continue;
}
hctx->tags = set->tags[i];
WARN_ON(!hctx->tags);

View File

@ -7,6 +7,9 @@
struct blk_mq_tag_set;
/**
* struct blk_mq_ctx - State for a software queue facing the submitting CPUs
*/
struct blk_mq_ctx {
struct {
spinlock_t lock;

View File

@ -204,9 +204,14 @@ static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
down_read(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type | test, mask | test);
if (!alg && test)
alg = __crypto_alg_lookup(name, type, mask) ?
ERR_PTR(-ELIBBAD) : NULL;
if (!alg && test) {
alg = __crypto_alg_lookup(name, type, mask);
if (alg && !crypto_is_larval(alg)) {
/* Test failed */
crypto_mod_put(alg);
alg = ERR_PTR(-ELIBBAD);
}
}
up_read(&crypto_alg_sem);
return alg;

View File

@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
if (!drbg)
return;
kzfree(drbg->Vbuf);
drbg->Vbuf = NULL;
drbg->V = NULL;
kzfree(drbg->Cbuf);
drbg->Cbuf = NULL;
drbg->C = NULL;
kzfree(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;

View File

@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void)
return opregion;
}
static bool dmi_is_desktop(void)
{
const char *chassis_type;
chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
if (!chassis_type)
return false;
if (!strcmp(chassis_type, "3") || /* 3: Desktop */
!strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
!strcmp(chassis_type, "5") || /* 5: Pizza Box */
!strcmp(chassis_type, "6") || /* 6: Mini Tower */
!strcmp(chassis_type, "7") || /* 7: Tower */
!strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
return true;
return false;
}
int acpi_video_register(void)
{
int ret = 0;
@ -2143,8 +2162,12 @@ int acpi_video_register(void)
* win8 ready (where we also prefer the native backlight driver, so
* normally the acpi_video code should not register there anyways).
*/
if (only_lcd == -1)
only_lcd = acpi_osi_is_win8();
if (only_lcd == -1) {
if (dmi_is_desktop() && acpi_osi_is_win8())
only_lcd = true;
else
only_lcd = false;
}
dmi_check_system(video_dmi_table);

View File

@ -12,23 +12,64 @@
#define pr_fmt(fmt) "ACPI: watchdog: " fmt
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include "internal.h"
static const struct dmi_system_id acpi_watchdog_skip[] = {
{
/*
* On Lenovo Z50-70 there are two issues with the WDAT
* table. First some of the instructions use RTC SRAM
* to store persistent information. This does not work well
* with Linux RTC driver. Second, more important thing is
* that the instructions do not actually reset the system.
*
* On this particular system iTCO_wdt seems to work just
* fine so we prefer that over WDAT for now.
*
* See also https://bugzilla.kernel.org/show_bug.cgi?id=199033.
*/
.ident = "Lenovo Z50-70",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "20354"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Z50-70"),
},
},
{}
};
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
{
const struct acpi_table_wdat *wdat = NULL;
acpi_status status;
if (acpi_disabled)
return NULL;
if (dmi_check_system(acpi_watchdog_skip))
return NULL;
status = acpi_get_table(ACPI_SIG_WDAT, 0,
(struct acpi_table_header **)&wdat);
if (ACPI_FAILURE(status)) {
/* It is fine if there is no WDAT */
return NULL;
}
return wdat;
}
/**
* Returns true if this system should prefer ACPI based watchdog instead of
* the native one (which are typically the same hardware).
*/
bool acpi_has_watchdog(void)
{
struct acpi_table_header hdr;
if (acpi_disabled)
return false;
return ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_WDAT, 0, &hdr));
return !!acpi_watchdog_get_wdat();
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
@ -41,12 +82,10 @@ void __init acpi_watchdog_init(void)
struct platform_device *pdev;
struct resource *resources;
size_t nresources = 0;
acpi_status status;
int i;
status = acpi_get_table(ACPI_SIG_WDAT, 0,
(struct acpi_table_header **)&wdat);
if (ACPI_FAILURE(status)) {
wdat = acpi_watchdog_get_wdat();
if (!wdat) {
/* It is fine if there is no WDAT */
return;
}

View File

@ -635,4 +635,26 @@ module_param_call(lid_init_state,
NULL, 0644);
MODULE_PARM_DESC(lid_init_state, "Behavior for reporting LID initial state");
module_acpi_driver(acpi_button_driver);
static int acpi_button_register_driver(struct acpi_driver *driver)
{
/*
* Modules such as nouveau.ko and i915.ko have a link time dependency
* on acpi_lid_open(), and would therefore not be loadable on ACPI
* capable kernels booted in non-ACPI mode if the return value of
* acpi_bus_register_driver() is returned from here with ACPI disabled
* when this driver is built as a module.
*/
if (acpi_disabled)
return 0;
return acpi_bus_register_driver(driver);
}
static void acpi_button_unregister_driver(struct acpi_driver *driver)
{
if (!acpi_disabled)
acpi_bus_unregister_driver(driver);
}
module_driver(acpi_button_driver, acpi_button_register_driver,
acpi_button_unregister_driver);

View File

@ -2166,10 +2166,10 @@ int __init acpi_scan_init(void)
acpi_cmos_rtc_init();
acpi_container_init();
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
acpi_int340x_thermal_init();
acpi_amba_init();
acpi_watchdog_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);

Some files were not shown because too many files have changed in this diff Show More