Merge branch 'ras/urgent' into ras/core

Pick up urgent fix as pending patch depends on it.
This commit is contained in:
Thomas Gleixner 2018-05-19 15:20:49 +02:00
commit 95b5c0a592
407 changed files with 4002 additions and 1961 deletions

View File

@ -244,3 +244,11 @@ Description: read only
Returns 1 if the psl timebase register is synchronized
with the core timebase register, 0 otherwise.
Users: https://github.com/ibm-capi/libcxl
What: /sys/class/cxl/<card>/tunneled_ops_supported
Date: May 2018
Contact: linuxppc-dev@lists.ozlabs.org
Description: read only
Returns 1 if tunneled operations are supported in capi mode,
0 otherwise.
Users: https://github.com/ibm-capi/libcxl

View File

@ -145,7 +145,7 @@ feature enabled.]
In this mode ``intel_pstate`` registers utilization update callbacks with the
CPU scheduler in order to run a P-state selection algorithm, either
``powersave`` or ``performance``, depending on the ``scaling_cur_freq`` policy
``powersave`` or ``performance``, depending on the ``scaling_governor`` policy
setting in ``sysfs``. The current CPU frequency information to be made
available from the ``scaling_cur_freq`` policy attribute in ``sysfs`` is
periodically updated by those utilization update callbacks too.

View File

@ -15,7 +15,7 @@ Sleep States That Can Be Supported
==================================
Depending on its configuration and the capabilities of the platform it runs on,
the Linux kernel can support up to four system sleep states, includig
the Linux kernel can support up to four system sleep states, including
hibernation and up to three variants of system suspend. The sleep states that
can be supported by the kernel are listed below.

View File

@ -264,7 +264,10 @@ i) Constructor
data device, but just remove the mapping.
read_only: Don't allow any changes to be made to the pool
metadata.
metadata. This mode is only available after the
thin-pool has been created and first used in full
read/write mode. It cannot be specified on initial
thin-pool creation.
error_if_no_space: Error IOs, instead of queueing, if no space.

View File

@ -30,7 +30,6 @@ compatible:
Optional properties:
- dma-coherent : Present if dma operations are coherent
- clocks : a list of phandle + clock specifier pairs
- resets : a list of phandle + reset specifier pairs
- target-supply : regulator for SATA target power
- phys : reference to the SATA PHY node
- phy-names : must be "sata-phy"

View File

@ -38,7 +38,7 @@ Display Timings
require specific display timings. The panel-timing subnode expresses those
timings as specified in the timing subnode section of the display timing
bindings defined in
Documentation/devicetree/bindings/display/display-timing.txt.
Documentation/devicetree/bindings/display/panel/display-timing.txt.
Connectivity

View File

@ -26,6 +26,7 @@ Required Properties:
- "renesas,dmac-r8a7794" (R-Car E2)
- "renesas,dmac-r8a7795" (R-Car H3)
- "renesas,dmac-r8a7796" (R-Car M3-W)
- "renesas,dmac-r8a77965" (R-Car M3-N)
- "renesas,dmac-r8a77970" (R-Car V3M)
- "renesas,dmac-r8a77980" (R-Car V3H)

View File

@ -5,7 +5,9 @@ Required properties:
- compatible: Must contain one or more of the following:
- "renesas,rcar-gen3-canfd" for R-Car Gen3 compatible controller.
- "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller.
- "renesas,r8a7796-canfd" for R8A7796 (R-Car M3) compatible controller.
- "renesas,r8a7796-canfd" for R8A7796 (R-Car M3-W) compatible controller.
- "renesas,r8a77970-canfd" for R8A77970 (R-Car V3M) compatible controller.
- "renesas,r8a77980-canfd" for R8A77980 (R-Car V3H) compatible controller.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first, followed by the

View File

@ -18,6 +18,7 @@ Required properties:
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
- "renesas,etheravb-r8a77965" for the R8A77965 SoC.
- "renesas,etheravb-r8a77970" for the R8A77970 SoC.
- "renesas,etheravb-r8a77980" for the R8A77980 SoC.
- "renesas,etheravb-r8a77995" for the R8A77995 SoC.

View File

@ -56,9 +56,9 @@ pins it needs, and how they should be configured, with regard to muxer
configuration, drive strength and pullups. If one of these options is
not set, its actual value will be unspecified.
This driver supports the generic pin multiplexing and configuration
bindings. For details on each properties, you can refer to
./pinctrl-bindings.txt.
Allwinner A1X Pin Controller supports the generic pin multiplexing and
configuration bindings. For details on each properties, you can refer to
./pinctrl-bindings.txt.
Required sub-node properties:
- pins

View File

@ -43,6 +43,8 @@ Required properties:
- "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
- "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
- "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
- "renesas,scif-r8a77965" for R8A77965 (R-Car M3-N) SCIF compatible UART.
- "renesas,hscif-r8a77965" for R8A77965 (R-Car M3-N) HSCIF compatible UART.
- "renesas,scif-r8a77970" for R8A77970 (R-Car V3M) SCIF compatible UART.
- "renesas,hscif-r8a77970" for R8A77970 (R-Car V3M) HSCIF compatible UART.
- "renesas,scif-r8a77980" for R8A77980 (R-Car V3H) SCIF compatible UART.

View File

@ -182,6 +182,7 @@ karo Ka-Ro electronics GmbH
keithkoep Keith & Koep GmbH
keymile Keymile GmbH
khadas Khadas
kiebackpeter Kieback & Peter GmbH
kinetic Kinetic Technologies
kingnovel Kingnovel Technology Co., Ltd.
kosagi Sutajio Ko-Usagi PTE Ltd.

View File

@ -98,6 +98,14 @@ Finally, if you need to remove all overlays in one-go, just call
of_overlay_remove_all() which will remove every single one in the correct
order.
In addition, there is the option to register notifiers that get called on
overlay operations. See of_overlay_notifier_register/unregister and
enum of_overlay_notify_action for details.
Note that a notifier callback is not supposed to store pointers to a device
tree node or its content beyond OF_OVERLAY_POST_REMOVE corresponding to the
respective node it received.
Overlay DTS Format
------------------

View File

@ -72,8 +72,8 @@ KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
flag || value || meaning
==================================================================================
KVM_HINTS_DEDICATED || 0 || guest checks this feature bit to
|| || determine if there is vCPU pinning
|| || and there is no vCPU over-commitment,
KVM_HINTS_REALTIME || 0 || guest checks this feature bit to
|| || determine that vCPUs are never
|| || preempted for an unlimited time,
|| || allowing optimizations
----------------------------------------------------------------------------------

View File

@ -137,9 +137,9 @@ Maintainers List (try to look for most precise areas first)
-----------------------------------
3C59X NETWORK DRIVER
M: Steffen Klassert <klassert@mathematik.tu-chemnitz.de>
M: Steffen Klassert <klassert@kernel.org>
L: netdev@vger.kernel.org
S: Maintained
S: Odd Fixes
F: Documentation/networking/vortex.txt
F: drivers/net/ethernet/3com/3c59x.c
@ -3691,7 +3691,6 @@ F: drivers/cpufreq/arm_big_little_dt.c
CPU POWER MONITORING SUBSYSTEM
M: Thomas Renninger <trenn@suse.com>
M: Shuah Khan <shuahkh@osg.samsung.com>
M: Shuah Khan <shuah@kernel.org>
L: linux-pm@vger.kernel.org
S: Maintained
@ -7696,10 +7695,10 @@ F: include/linux/sunrpc/
F: include/uapi/linux/sunrpc/
KERNEL SELFTEST FRAMEWORK
M: Shuah Khan <shuahkh@osg.samsung.com>
M: Shuah Khan <shuah@kernel.org>
L: linux-kselftest@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shuah/linux-kselftest.git
Q: https://patchwork.kernel.org/project/linux-kselftest/list/
S: Maintained
F: tools/testing/selftests/
F: Documentation/dev-tools/kselftest*
@ -9873,7 +9872,7 @@ F: include/linux/platform_data/nxp-nci.h
F: Documentation/devicetree/bindings/net/nfc/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@primarydata.com>
M: Trond Myklebust <trond.myklebust@hammerspace.com>
M: Anna Schumaker <anna.schumaker@netapp.com>
L: linux-nfs@vger.kernel.org
W: http://client.linux-nfs.org
@ -12222,7 +12221,7 @@ F: Documentation/s390/vfio-ccw.txt
F: include/uapi/linux/vfio_ccw.h
S390 ZCRYPT DRIVER
M: Harald Freudenberger <freude@de.ibm.com>
M: Harald Freudenberger <freude@linux.ibm.com>
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
@ -13266,6 +13265,12 @@ M: Jan-Benedict Glaw <jbglaw@lug-owl.de>
S: Maintained
F: arch/alpha/kernel/srm_env.c
ST STM32 I2C/SMBUS DRIVER
M: Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
L: linux-i2c@vger.kernel.org
S: Maintained
F: drivers/i2c/busses/i2c-stm32*
STABLE BRANCH
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
L: stable@vger.kernel.org
@ -14650,7 +14655,6 @@ F: drivers/usb/common/usb-otg-fsm.c
USB OVER IP DRIVER
M: Valentina Manea <valentina.manea.m@gmail.com>
M: Shuah Khan <shuahkh@osg.samsung.com>
M: Shuah Khan <shuah@kernel.org>
L: linux-usb@vger.kernel.org
S: Maintained

View File

@ -2,8 +2,8 @@
VERSION = 4
PATCHLEVEL = 17
SUBLEVEL = 0
EXTRAVERSION = -rc3
NAME = Fearless Coyote
EXTRAVERSION = -rc5
NAME = Merciless Moray
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"

View File

@ -464,6 +464,10 @@ config GCC_PLUGIN_LATENT_ENTROPY
config GCC_PLUGIN_STRUCTLEAK
bool "Force initialization of variables containing userspace addresses"
depends on GCC_PLUGINS
# Currently STRUCTLEAK inserts initialization out of live scope of
# variables from KASAN point of view. This leads to KASAN false
# positive reports. Prohibit this combination for now.
depends on !KASAN_EXTRA
help
This plugin zero-initializes any structures containing a
__user attribute. This can prevent some classes of information

View File

@ -303,7 +303,7 @@ wdog: wdog@53fdc000 {
};
can1: can@53fe4000 {
compatible = "fsl,imx35-flexcan";
compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
reg = <0x53fe4000 0x1000>;
clocks = <&clks 33>, <&clks 33>;
clock-names = "ipg", "per";
@ -312,7 +312,7 @@ can1: can@53fe4000 {
};
can2: can@53fe8000 {
compatible = "fsl,imx35-flexcan";
compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
reg = <0x53fe8000 0x1000>;
clocks = <&clks 34>, <&clks 34>;
clock-names = "ipg", "per";

View File

@ -551,7 +551,7 @@ uart2: serial@53fc0000 {
};
can1: can@53fc8000 {
compatible = "fsl,imx53-flexcan";
compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
reg = <0x53fc8000 0x4000>;
interrupts = <82>;
clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
@ -561,7 +561,7 @@ can1: can@53fc8000 {
};
can2: can@53fcc000 {
compatible = "fsl,imx53-flexcan";
compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
reg = <0x53fcc000 0x4000>;
interrupts = <83>;
clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,

View File

@ -309,6 +309,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
return 8;
}
/*
* We are not in the kvm->srcu critical section most of the time, so we take
* the SRCU read lock here. Since we copy the data from the user page, we
* can immediately drop the lock again.
*/
static inline int kvm_read_guest_lock(struct kvm *kvm,
gpa_t gpa, void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_read_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
static inline void *kvm_get_hyp_vector(void)
{
return kvm_ksym_ref(__kvm_hyp_vector);

View File

@ -75,6 +75,7 @@
#define ARM_CPU_IMP_CAVIUM 0x43
#define ARM_CPU_IMP_BRCM 0x42
#define ARM_CPU_IMP_QCOM 0x51
#define ARM_CPU_IMP_NVIDIA 0x4E
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@ -99,6 +100,9 @@
#define QCOM_CPU_PART_FALKOR 0xC00
#define QCOM_CPU_PART_KRYO 0x200
#define NVIDIA_CPU_PART_DENVER 0x003
#define NVIDIA_CPU_PART_CARMEL 0x004
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@ -114,6 +118,8 @@
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#ifndef __ASSEMBLY__

View File

@ -333,7 +333,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
} else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25);
vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr);
vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
}
}

View File

@ -360,6 +360,22 @@ static inline unsigned int kvm_get_vmid_bits(void)
return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
}
/*
* We are not in the kvm->srcu critical section most of the time, so we take
* the SRCU read lock here. Since we copy the data from the user page, we
* can immediately drop the lock again.
*/
static inline int kvm_read_guest_lock(struct kvm *kvm,
gpa_t gpa, void *data, unsigned long len)
{
int srcu_idx = srcu_read_lock(&kvm->srcu);
int ret = kvm_read_guest(kvm, gpa, data, len);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
#ifdef CONFIG_KVM_INDIRECT_VECTORS
/*
* EL2 vectors can be mapped and rerouted in a number of ways,

View File

@ -316,6 +316,7 @@ static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
{},
};

View File

@ -18,11 +18,20 @@
#include <linux/compiler.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/kvm_host.h>
#include <linux/swab.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
}
/*
* __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
* guest.
@ -64,14 +73,19 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
addr += fault_ipa - vgic->vgic_cpu_base;
if (kvm_vcpu_dabt_iswrite(vcpu)) {
u32 data = vcpu_data_guest_to_host(vcpu,
vcpu_get_reg(vcpu, rd),
sizeof(u32));
u32 data = vcpu_get_reg(vcpu, rd);
if (__is_be(vcpu)) {
/* guest pre-swabbed data, undo this for writel() */
data = swab32(data);
}
writel_relaxed(data, addr);
} else {
u32 data = readl_relaxed(addr);
vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data,
sizeof(u32)));
if (__is_be(vcpu)) {
/* guest expects swabbed data */
data = swab32(data);
}
vcpu_set_reg(vcpu, rd, data);
}
return 1;

View File

@ -646,8 +646,10 @@ static int keep_initrd __initdata;
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
if (!keep_initrd) {
free_reserved_area((void *)start, (void *)end, 0, "initrd");
memblock_free(__virt_to_phys(start), end - start);
}
}
static int __init keepinitrd_setup(char *__unused)

View File

@ -69,17 +69,30 @@ struct dyn_arch_ftrace {
#endif
#if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__)
#ifdef PPC64_ELF_ABI_v1
/*
* Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
* for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
* those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
#ifdef PPC64_ELF_ABI_v1
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
/*
* Compare the symbol name with the system call name. Skip the .sys or .SyS
* prefix from the symbol name and the sys prefix from the system call name and
* just match the rest. This is only needed on ppc64 since symbol names on
* 32bit do not start with a period so the generic function will work.
*/
return !strcmp(sym + 4, name + 3);
/* We need to skip past the initial dot, and the __se_sys alias */
return !strcmp(sym + 1, name) ||
(!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
(!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
(!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
(!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
}
#else
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
return !strcmp(sym, name) ||
(!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
(!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
}
#endif
#endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */

View File

@ -165,7 +165,6 @@ struct paca_struct {
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 irq_soft_mask; /* mask for irq soft masking */
u8 soft_enabled; /* irq soft-enable flag */
u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */

View File

@ -91,6 +91,7 @@ extern int start_topology_update(void);
extern int stop_topology_update(void);
extern int prrn_is_enabled(void);
extern int find_and_online_cpu_nid(int cpu);
extern int timed_topology_update(int nsecs);
#else
static inline int start_topology_update(void)
{
@ -108,16 +109,12 @@ static inline int find_and_online_cpu_nid(int cpu)
{
return 0;
}
static inline int timed_topology_update(int nsecs)
{
return 0;
}
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
#if defined(CONFIG_PPC_SPLPAR)
extern int timed_topology_update(int nsecs);
#else
#define timed_topology_update(nsecs)
#endif /* CONFIG_PPC_SPLPAR */
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_NEED_MULTIPLE_NODES */
#include <asm-generic/topology.h>
#ifdef CONFIG_SMP

View File

@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
return count;
}
/*
* This can be called in the panic path with interrupts off, so use
* mdelay in that case.
*/
static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
{
s64 rc = OPAL_BUSY;
@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_write_nvram(__pa(buf), count, off);
if (rc == OPAL_BUSY_EVENT) {
msleep(OPAL_BUSY_DELAY_MS);
if (in_interrupt() || irqs_disabled())
mdelay(OPAL_BUSY_DELAY_MS);
else
msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
} else if (rc == OPAL_BUSY) {
msleep(OPAL_BUSY_DELAY_MS);
if (in_interrupt() || irqs_disabled())
mdelay(OPAL_BUSY_DELAY_MS);
else
msleep(OPAL_BUSY_DELAY_MS);
}
}

View File

@ -261,9 +261,9 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@ -284,7 +284,7 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NF_TABLES_IPV6=y
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -305,7 +305,7 @@ CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NF_TABLES_BRIDGE=y
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@ -604,7 +604,6 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y

View File

@ -259,9 +259,9 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
@ -282,7 +282,7 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NF_TABLES_IPV6=y
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
@ -303,7 +303,7 @@ CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NF_TABLES_BRIDGE=y
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m

View File

@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
#include <asm/nospec-insn.h>
#include <asm/vx-insn.h>
/* Vector register range containing CRC-32 constants */
@ -67,6 +68,8 @@
.previous
GEN_BR_THUNK %r14
.text
/*
* The CRC-32 function(s) use these calling conventions:
@ -203,6 +206,6 @@ ENTRY(crc32_be_vgfm_16)
.Ldone:
VLGVF %r2,%v2,3
br %r14
BR_EX %r14
.previous

View File

@ -14,6 +14,7 @@
*/
#include <linux/linkage.h>
#include <asm/nospec-insn.h>
#include <asm/vx-insn.h>
/* Vector register range containing CRC-32 constants */
@ -76,6 +77,7 @@
.previous
GEN_BR_THUNK %r14
.text
@ -264,6 +266,6 @@ crc32_le_vgfm_generic:
.Ldone:
VLGVF %r2,%v2,2
br %r14
BR_EX %r14
.previous

View File

@ -0,0 +1,196 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_S390_NOSPEC_ASM_H
#define _ASM_S390_NOSPEC_ASM_H
#include <asm/alternative-asm.h>
#include <asm/asm-offsets.h>
#include <asm/dwarf.h>
#ifdef __ASSEMBLY__
#ifdef CONFIG_EXPOLINE
_LC_BR_R1 = __LC_BR_R1
/*
* The expoline macros are used to create thunks in the same format
* as gcc generates them. The 'comdat' section flag makes sure that
* the various thunks are merged into a single copy.
*/
.macro __THUNK_PROLOG_NAME name
.pushsection .text.\name,"axG",@progbits,\name,comdat
.globl \name
.hidden \name
.type \name,@function
\name:
CFI_STARTPROC
.endm
.macro __THUNK_EPILOG
CFI_ENDPROC
.popsection
.endm
.macro __THUNK_PROLOG_BR r1,r2
__THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_PROLOG_BC d0,r1,r2
__THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BR r1,r2
jg __s390x_indirect_jump_r\r2\()use_r\r1
.endm
.macro __THUNK_BC d0,r1,r2
jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
.endm
.macro __THUNK_BRASL r1,r2,r3
brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
.endm
.macro __DECODE_RR expand,reg,ruse
.set __decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \reg,%r\r1
.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \ruse,%r\r2
\expand \r1,\r2
.set __decode_fail,0
.endif
.endr
.endif
.endr
.if __decode_fail == 1
.error "__DECODE_RR failed"
.endif
.endm
.macro __DECODE_RRR expand,rsave,rtarget,ruse
.set __decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \rsave,%r\r1
.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \rtarget,%r\r2
.irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \ruse,%r\r3
\expand \r1,\r2,\r3
.set __decode_fail,0
.endif
.endr
.endif
.endr
.endif
.endr
.if __decode_fail == 1
.error "__DECODE_RRR failed"
.endif
.endm
.macro __DECODE_DRR expand,disp,reg,ruse
.set __decode_fail,1
.irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \reg,%r\r1
.irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
.ifc \ruse,%r\r2
\expand \disp,\r1,\r2
.set __decode_fail,0
.endif
.endr
.endif
.endr
.if __decode_fail == 1
.error "__DECODE_DRR failed"
.endif
.endm
.macro __THUNK_EX_BR reg,ruse
# Be very careful when adding instructions to this macro!
# The ALTERNATIVE replacement code has a .+10 which targets
# the "br \reg" after the code has been patched.
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
exrl 0,555f
j .
#else
.ifc \reg,%r1
ALTERNATIVE "ex %r0,_LC_BR_R1", ".insn ril,0xc60000000000,0,.+10", 35
j .
.else
larl \ruse,555f
ex 0,0(\ruse)
j .
.endif
#endif
555: br \reg
.endm
.macro __THUNK_EX_BC disp,reg,ruse
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
exrl 0,556f
j .
#else
larl \ruse,556f
ex 0,0(\ruse)
j .
#endif
556: b \disp(\reg)
.endm
.macro GEN_BR_THUNK reg,ruse=%r1
__DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
__THUNK_EX_BR \reg,\ruse
__THUNK_EPILOG
.endm
.macro GEN_B_THUNK disp,reg,ruse=%r1
__DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
__THUNK_EX_BC \disp,\reg,\ruse
__THUNK_EPILOG
.endm
.macro BR_EX reg,ruse=%r1
557: __DECODE_RR __THUNK_BR,\reg,\ruse
.pushsection .s390_indirect_branches,"a",@progbits
.long 557b-.
.popsection
.endm
.macro B_EX disp,reg,ruse=%r1
558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
.pushsection .s390_indirect_branches,"a",@progbits
.long 558b-.
.popsection
.endm
.macro BASR_EX rsave,rtarget,ruse=%r1
559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
.pushsection .s390_indirect_branches,"a",@progbits
.long 559b-.
.popsection
.endm
#else
.macro GEN_BR_THUNK reg,ruse=%r1
.endm
.macro GEN_B_THUNK disp,reg,ruse=%r1
.endm
.macro BR_EX reg,ruse=%r1
br \reg
.endm
.macro B_EX disp,reg,ruse=%r1
b \disp(\reg)
.endm
.macro BASR_EX rsave,rtarget,ruse=%r1
basr \rsave,\rtarget
.endm
#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_NOSPEC_ASM_H */

View File

@ -13,5 +13,11 @@
int verify_sha256_digest(void);
extern u64 kernel_entry;
extern u64 kernel_type;
extern u64 crash_start;
extern u64 crash_size;
#endif /* __ASSEMBLY__ */
#endif /* _S390_PURGATORY_H_ */

View File

@ -65,6 +65,7 @@ obj-y += nospec-branch.o
extra-y += head.o head64.o vmlinux.lds
obj-$(CONFIG_SYSFS) += nospec-sysfs.o
CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
obj-$(CONFIG_MODULES) += module.o

View File

@ -181,6 +181,7 @@ int main(void)
OFFSET(__LC_MACHINE_FLAGS, lowcore, machine_flags);
OFFSET(__LC_PREEMPT_COUNT, lowcore, preempt_count);
OFFSET(__LC_GMAP, lowcore, gmap);
OFFSET(__LC_BR_R1, lowcore, br_r1_trampoline);
/* software defined ABI-relevant lowcore locations 0xe00 - 0xe20 */
OFFSET(__LC_DUMP_REIPL, lowcore, ipib);
/* hardware defined lowcore locations 0x1000 - 0x18ff */

View File

@ -9,18 +9,22 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
#include <asm/sigp.h>
GEN_BR_THUNK %r9
GEN_BR_THUNK %r14
ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_mcck_handler_fn
lg %r1,0(%r1)
ltgr %r1,%r1
lg %r9,0(%r1)
ltgr %r9,%r9
jz 1f
basr %r14,%r1
BASR_EX %r14,%r9
1: la %r1,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
lpswe __LC_MCK_OLD_PSW
@ -37,10 +41,10 @@ ENTRY(s390_base_ext_handler)
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_ext_handler_fn
lg %r1,0(%r1)
ltgr %r1,%r1
lg %r9,0(%r1)
ltgr %r9,%r9
jz 1f
basr %r14,%r1
BASR_EX %r14,%r9
1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpswe __LC_EXT_OLD_PSW
@ -57,10 +61,10 @@ ENTRY(s390_base_pgm_handler)
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_pgm_handler_fn
lg %r1,0(%r1)
ltgr %r1,%r1
lg %r9,0(%r1)
ltgr %r9,%r9
jz 1f
basr %r14,%r1
BASR_EX %r14,%r9
lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13)
@ -117,7 +121,7 @@ ENTRY(diag308_reset)
larl %r4,.Lcontinue_psw # Restore PSW flags
lpswe 0(%r4)
.Lcontinue:
br %r14
BR_EX %r14
.align 16
.Lrestart_psw:
.long 0x00080000,0x80000000 + .Lrestart_part2

View File

@ -28,6 +28,7 @@
#include <asm/setup.h>
#include <asm/nmi.h>
#include <asm/export.h>
#include <asm/nospec-insn.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8
@ -183,67 +184,9 @@ _LPP_OFFSET = __LC_LPP
"jnz .+8; .long 0xb2e8d000", 82
.endm
#ifdef CONFIG_EXPOLINE
.macro GEN_BR_THUNK name,reg,tmp
.section .text.\name,"axG",@progbits,\name,comdat
.globl \name
.hidden \name
.type \name,@function
\name:
CFI_STARTPROC
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
exrl 0,0f
#else
larl \tmp,0f
ex 0,0(\tmp)
#endif
j .
0: br \reg
CFI_ENDPROC
.endm
GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
.macro BASR_R14_R9
0: brasl %r14,__s390x_indirect_jump_r1use_r9
.pushsection .s390_indirect_branches,"a",@progbits
.long 0b-.
.popsection
.endm
.macro BR_R1USE_R14
0: jg __s390x_indirect_jump_r1use_r14
.pushsection .s390_indirect_branches,"a",@progbits
.long 0b-.
.popsection
.endm
.macro BR_R11USE_R14
0: jg __s390x_indirect_jump_r11use_r14
.pushsection .s390_indirect_branches,"a",@progbits
.long 0b-.
.popsection
.endm
#else /* CONFIG_EXPOLINE */
.macro BASR_R14_R9
basr %r14,%r9
.endm
.macro BR_R1USE_R14
br %r14
.endm
.macro BR_R11USE_R14
br %r14
.endm
#endif /* CONFIG_EXPOLINE */
GEN_BR_THUNK %r9
GEN_BR_THUNK %r14
GEN_BR_THUNK %r14,%r11
.section .kprobes.text, "ax"
.Ldummy:
@ -260,7 +203,7 @@ _LPP_OFFSET = __LC_LPP
ENTRY(__bpon)
.globl __bpon
BPON
BR_R1USE_R14
BR_EX %r14
/*
* Scheduler resume function, called by switch_to
@ -284,7 +227,7 @@ ENTRY(__switch_to)
mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
BR_R1USE_R14
BR_EX %r14
.L__critical_start:
@ -351,7 +294,7 @@ sie_exit:
xgr %r5,%r5
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
lg %r2,__SF_SIE_REASON(%r15) # return exit reason code
BR_R1USE_R14
BR_EX %r14
.Lsie_fault:
lghi %r14,-EFAULT
stg %r14,__SF_SIE_REASON(%r15) # set exit reason code
@ -410,7 +353,7 @@ ENTRY(system_call)
lgf %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
BASR_R14_R9 # call sys_xxxx
BASR_EX %r14,%r9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
@ -595,7 +538,7 @@ ENTRY(system_call)
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
BASR_R14_R9 # call sys_xxx
BASR_EX %r14,%r9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
TSTMSK __TI_flags(%r12),_TIF_TRACE
@ -619,7 +562,7 @@ ENTRY(ret_from_fork)
lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
BASR_R14_R9
BASR_EX %r14,%r9
j .Lsysc_tracenogo
/*
@ -701,7 +644,7 @@ ENTRY(pgm_check_handler)
je .Lpgm_return
lgf %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
BASR_R14_R9 # branch to interrupt-handler
BASR_EX %r14,%r9 # branch to interrupt-handler
.Lpgm_return:
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
@ -1019,7 +962,7 @@ ENTRY(psw_idle)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
BR_R1USE_R14
BR_EX %r14
.Lpsw_idle_end:
/*
@ -1061,7 +1004,7 @@ ENTRY(save_fpu_regs)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
.Lsave_fpu_regs_exit:
BR_R1USE_R14
BR_EX %r14
.Lsave_fpu_regs_end:
EXPORT_SYMBOL(save_fpu_regs)
@ -1107,7 +1050,7 @@ load_fpu_regs:
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
.Lload_fpu_regs_exit:
BR_R1USE_R14
BR_EX %r14
.Lload_fpu_regs_end:
.L__critical_end:
@ -1322,7 +1265,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
0: BR_R11USE_R14
0: BR_EX %r14
.align 8
.Lcleanup_table:
@ -1358,7 +1301,7 @@ cleanup_critical:
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
BR_R11USE_R14
BR_EX %r14
#endif
.Lcleanup_system_call:
@ -1412,7 +1355,7 @@ cleanup_critical:
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,.Lsysc_do_svc
BR_R11USE_R14
BR_EX %r14,%r11
.Lcleanup_system_call_insn:
.quad system_call
.quad .Lsysc_stmg
@ -1424,7 +1367,7 @@ cleanup_critical:
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
BR_R11USE_R14
BR_EX %r14,%r11
.Lcleanup_sysc_restore:
# check if stpt has been executed
@ -1441,14 +1384,14 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
BR_R11USE_R14
BR_EX %r14,%r11
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
larl %r9,.Lio_tif
BR_R11USE_R14
BR_EX %r14,%r11
.Lcleanup_io_restore:
# check if stpt has been executed
@ -1462,7 +1405,7 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
BR_R11USE_R14
BR_EX %r14,%r11
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
@ -1515,17 +1458,17 @@ cleanup_critical:
# prepare return psw
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
BR_R11USE_R14
BR_EX %r14,%r11
.Lcleanup_idle_insn:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
larl %r9,save_fpu_regs
BR_R11USE_R14
BR_EX %r14,%r11
.Lcleanup_load_fpu_regs:
larl %r9,load_fpu_regs
BR_R11USE_R14
BR_EX %r14,%r11
/*
* Integer constants

View File

@ -176,10 +176,9 @@ void do_softirq_own_stack(void)
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
asm volatile(" la 15,0(%0)\n"
" basr 14,%2\n"
" brasl 14,__do_softirq\n"
" la 15,0(%1)\n"
: : "a" (new), "a" (old),
"a" (__do_softirq)
: : "a" (new), "a" (old)
: "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
} else {

View File

@ -9,13 +9,17 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
#include <asm/export.h>
GEN_BR_THUNK %r1
GEN_BR_THUNK %r14
.section .kprobes.text, "ax"
ENTRY(ftrace_stub)
br %r14
BR_EX %r14
#define STACK_FRAME_SIZE (STACK_FRAME_OVERHEAD + __PT_SIZE)
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
@ -23,7 +27,7 @@ ENTRY(ftrace_stub)
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
ENTRY(_mcount)
br %r14
BR_EX %r14
EXPORT_SYMBOL(_mcount)
@ -53,7 +57,7 @@ ENTRY(ftrace_caller)
#endif
lgr %r3,%r14
la %r5,STACK_PTREGS(%r15)
basr %r14,%r1
BASR_EX %r14,%r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
# The j instruction gets runtime patched to a nop instruction.
# See ftrace_enable_ftrace_graph_caller.
@ -68,7 +72,7 @@ ftrace_graph_caller_end:
#endif
lg %r1,(STACK_PTREGS_PSW+8)(%r15)
lmg %r2,%r15,(STACK_PTREGS_GPRS+2*8)(%r15)
br %r1
BR_EX %r1
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@ -81,6 +85,6 @@ ENTRY(return_to_handler)
aghi %r15,STACK_FRAME_OVERHEAD
lgr %r14,%r2
lmg %r2,%r5,32(%r15)
br %r14
BR_EX %r14
#endif

View File

@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/nospec-branch.h>
static int __init nobp_setup_early(char *str)
@ -44,24 +43,6 @@ static int __init nospec_report(void)
}
arch_initcall(nospec_report);
#ifdef CONFIG_SYSFS
ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}
ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction.\n");
return sprintf(buf, "Vulnerable\n");
}
#endif
#ifdef CONFIG_EXPOLINE
int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
@ -112,7 +93,6 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
s32 *epo;
/* Second part of the instruction replace is always a nop */
memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
for (epo = start; epo < end; epo++) {
instr = (u8 *) epo + *epo;
if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
@ -133,18 +113,34 @@ static void __init_or_module __nospec_revert(s32 *start, s32 *end)
br = thunk + (*(int *)(thunk + 2)) * 2;
else
continue;
if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
/* Check for unconditional branch 0x07f? or 0x47f???? */
if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
continue;
memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
switch (type) {
case BRCL_EXPOLINE:
/* brcl to thunk, replace with br + nop */
insnbuf[0] = br[0];
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
if (br[0] == 0x47) {
/* brcl to b, replace with bc + nopr */
insnbuf[2] = br[2];
insnbuf[3] = br[3];
} else {
/* brcl to br, replace with bcr + nop */
}
break;
case BRASL_EXPOLINE:
/* brasl to thunk, replace with basr + nop */
insnbuf[0] = 0x0d;
insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
if (br[0] == 0x47) {
/* brasl to b, replace with bas + nopr */
insnbuf[0] = 0x4d;
insnbuf[2] = br[2];
insnbuf[3] = br[3];
} else {
/* brasl to br, replace with basr + nop */
insnbuf[0] = 0x0d;
}
break;
}

View File

@ -0,0 +1,21 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
}
ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
return sprintf(buf, "Mitigation: execute trampolines\n");
if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
return sprintf(buf, "Mitigation: limited branch prediction\n");
return sprintf(buf, "Vulnerable\n");
}

View File

@ -753,6 +753,10 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
rate = 0;
if (attr->freq) {
if (!attr->sample_freq) {
err = -EINVAL;
goto out;
}
rate = freq_to_sample_rate(&si, attr->sample_freq);
rate = hw_limit_rate(&si, rate);
attr->freq = 0;

View File

@ -7,8 +7,11 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/nospec-insn.h>
#include <asm/sigp.h>
GEN_BR_THUNK %r9
#
# Issue "store status" for the current CPU to its prefix page
# and call passed function afterwards
@ -67,9 +70,9 @@ ENTRY(store_status)
st %r4,0(%r1)
st %r5,4(%r1)
stg %r2,8(%r1)
lgr %r1,%r2
lgr %r9,%r2
lgr %r2,%r3
br %r1
BR_EX %r9
.section .bss
.align 8

View File

@ -13,6 +13,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#include <asm/nospec-insn.h>
#include <asm/sigp.h>
/*
@ -24,6 +25,8 @@
* (see below) in the resume process.
* This function runs with disabled interrupts.
*/
GEN_BR_THUNK %r14
.section .text
ENTRY(swsusp_arch_suspend)
stmg %r6,%r15,__SF_GPRS(%r15)
@ -103,7 +106,7 @@ ENTRY(swsusp_arch_suspend)
spx 0x318(%r1)
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
br %r14
BR_EX %r14
/*
* Restore saved memory image to correct place and restore register context.
@ -197,11 +200,10 @@ pgm_check_entry:
larl %r15,init_thread_union
ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
larl %r2,.Lpanic_string
larl %r3,sclp_early_printk
lghi %r1,0
sam31
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
basr %r14,%r3
brasl %r14,sclp_early_printk
larl %r3,.Ldisabled_wait_31
lpsw 0(%r3)
4:
@ -267,7 +269,7 @@ restore_registers:
/* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
br %r14
BR_EX %r14
.section .data..nosave,"aw",@progbits
.align 8

View File

@ -7,6 +7,9 @@
#include <linux/linkage.h>
#include <asm/export.h>
#include <asm/nospec-insn.h>
GEN_BR_THUNK %r14
/*
* void *memmove(void *dest, const void *src, size_t n)
@ -33,14 +36,14 @@ ENTRY(memmove)
.Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
br %r14
BR_EX %r14
.Lmemmove_reverse:
ic %r0,0(%r4,%r3)
stc %r0,0(%r4,%r1)
brctg %r4,.Lmemmove_reverse
ic %r0,0(%r4,%r3)
stc %r0,0(%r4,%r1)
br %r14
BR_EX %r14
.Lmemmove_mvc:
mvc 0(1,%r1),0(%r3)
EXPORT_SYMBOL(memmove)
@ -77,7 +80,7 @@ ENTRY(memset)
.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
br %r14
BR_EX %r14
.Lmemset_fill:
cghi %r4,1
lgr %r1,%r2
@ -95,10 +98,10 @@ ENTRY(memset)
stc %r3,0(%r1)
larl %r5,.Lmemset_mvc
ex %r4,0(%r5)
br %r14
BR_EX %r14
.Lmemset_fill_exit:
stc %r3,0(%r1)
br %r14
BR_EX %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
@ -121,7 +124,7 @@ ENTRY(memcpy)
.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
br %r14
BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
la %r1,256(%r1)
@ -159,10 +162,10 @@ ENTRY(__memset\bits)
\insn %r3,0(%r1)
larl %r5,.L__memset_mvc\bits
ex %r4,0(%r5)
br %r14
BR_EX %r14
.L__memset_exit\bits:
\insn %r3,0(%r2)
br %r14
BR_EX %r14
.L__memset_mvc\bits:
mvc \bytes(1,%r1),0(%r1)
.endm

View File

@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
#include <asm/nospec-insn.h>
#include "bpf_jit.h"
/*
@ -54,7 +55,7 @@ ENTRY(sk_load_##NAME##_pos); \
clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
jh sk_load_##NAME##_slow; \
LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
b OFF_OK(%r6); /* Return */ \
B_EX OFF_OK,%r6; /* Return */ \
\
sk_load_##NAME##_slow:; \
lgr %r2,%r7; /* Arg1 = skb pointer */ \
@ -64,11 +65,14 @@ sk_load_##NAME##_slow:; \
brasl %r14,skb_copy_bits; /* Get data from skb */ \
LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
br %r6; /* Return */
BR_EX %r6; /* Return */
sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
GEN_BR_THUNK %r6
GEN_B_THUNK OFF_OK,%r6
/*
* Load 1 byte from SKB (optimized version)
*/
@ -80,7 +84,7 @@ ENTRY(sk_load_byte_pos)
clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
jnl sk_load_byte_slow
llgc %r14,0(%r3,%r12) # Get byte from skb
b OFF_OK(%r6) # Return OK
B_EX OFF_OK,%r6 # Return OK
sk_load_byte_slow:
lgr %r2,%r7 # Arg1 = skb pointer
@ -90,7 +94,7 @@ sk_load_byte_slow:
brasl %r14,skb_copy_bits # Get data from skb
llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
ltgr %r2,%r2 # Set cc to (%r2 != 0)
br %r6 # Return cc
BR_EX %r6 # Return cc
#define sk_negative_common(NAME, SIZE, LOAD) \
sk_load_##NAME##_slow_neg:; \
@ -104,7 +108,7 @@ sk_load_##NAME##_slow_neg:; \
jz bpf_error; \
LOAD %r14,0(%r2); /* Get data from pointer */ \
xr %r3,%r3; /* Set cc to zero */ \
br %r6; /* Return cc */
BR_EX %r6; /* Return cc */
sk_negative_common(word, 4, llgf)
sk_negative_common(half, 2, llgh)
@ -113,4 +117,4 @@ sk_negative_common(byte, 1, llgc)
bpf_error:
# force a return 0 from jit handler
ltgr %r15,%r15 # Set condition code
br %r6
BR_EX %r6

View File

@ -25,6 +25,8 @@
#include <linux/bpf.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
#include <asm/facility.h>
#include <asm/nospec-branch.h>
#include <asm/set_memory.h>
#include "bpf_jit.h"
@ -41,6 +43,8 @@ struct bpf_jit {
int base_ip; /* Base address for literal pool */
int ret0_ip; /* Address of return 0 */
int exit_ip; /* Address of exit */
int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int labels[1]; /* Labels for local jumps */
};
@ -250,6 +254,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
REG_SET_SEEN(b2); \
})
#define EMIT6_PCREL_RILB(op, b, target) \
({ \
int rel = (target - jit->prg) / 2; \
_EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
REG_SET_SEEN(b); \
})
#define EMIT6_PCREL_RIL(op, target) \
({ \
int rel = (target - jit->prg) / 2; \
_EMIT6(op | rel >> 16, rel & 0xffff); \
})
#define _EMIT6_IMM(op, imm) \
({ \
unsigned int __imm = (imm); \
@ -469,8 +486,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
save_restore_regs(jit, REGS_RESTORE, stack_depth);
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
jit->r14_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r14 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
} else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,0(%r1) */
EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
}
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
/* br %r14 */
_EMIT2(0x07fe);
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
(jit->seen & SEEN_FUNC)) {
jit->r1_thunk_ip = jit->prg;
/* Generate __s390_indirect_jump_r1 thunk */
if (test_facility(35)) {
/* exrl %r0,.+10 */
EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
/* br %r1 */
_EMIT2(0x07f1);
} else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline));
/* j . */
EMIT4_PCREL(0xa7f40000, 0);
}
}
}
/*
@ -966,8 +1020,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* lg %w1,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
EMIT_CONST_U64(func));
/* basr %r14,%w1 */
EMIT2(0x0d00, REG_14, REG_W1);
if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
/* brasl %r14,__s390_indirect_jump_r1 */
EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
} else {
/* basr %r14,%w1 */
EMIT2(0x0d00, REG_14, REG_W1);
}
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
if ((jit->seen & SEEN_SKB) &&

View File

@ -9,6 +9,7 @@ config SUPERH
select HAVE_IDE if HAS_IOPORT_MAP
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select NO_BOOTMEM
select ARCH_DISCARD_MEMBLOCK
select HAVE_OPROFILE
select HAVE_GENERIC_DMA_COHERENT

View File

@ -43,7 +43,11 @@ void __ref cpu_probe(void)
#endif
#if defined(CONFIG_CPU_J2)
#if defined(CONFIG_SMP)
unsigned cpu = hard_smp_processor_id();
#else
unsigned cpu = 0;
#endif
if (cpu == 0) of_scan_flat_dt(scan_cache, NULL);
if (j2_ccr_base) __raw_writel(0x80000303, j2_ccr_base + 4*cpu);
if (cpu != 0) return;

View File

@ -11,7 +11,6 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/utsname.h>

View File

@ -59,7 +59,9 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
split_page(pfn_to_page(virt_to_phys(ret) >> PAGE_SHIFT), order);
*dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
*dma_handle = virt_to_phys(ret);
if (!WARN_ON(!dev))
*dma_handle -= PFN_PHYS(dev->dma_pfn_offset);
return ret_nocache;
}
@ -69,9 +71,12 @@ void dma_generic_free_coherent(struct device *dev, size_t size,
unsigned long attrs)
{
int order = get_order(size);
unsigned long pfn = (dma_handle >> PAGE_SHIFT) + dev->dma_pfn_offset;
unsigned long pfn = dma_handle >> PAGE_SHIFT;
int k;
if (!WARN_ON(!dev))
pfn += dev->dma_pfn_offset;
for (k = 0; k < (1 << order); k++)
__free_pages(pfn_to_page(pfn + k), 0);
@ -143,7 +148,7 @@ int __init platform_resource_setup_memory(struct platform_device *pdev,
if (!memsize)
return 0;
buf = dma_alloc_coherent(NULL, memsize, &dma_handle, GFP_KERNEL);
buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL);
if (!buf) {
pr_warning("%s: unable to allocate memory\n", name);
return -ENOMEM;

View File

@ -211,59 +211,15 @@ void __init allocate_pgdat(unsigned int nid)
NODE_DATA(nid) = __va(phys);
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
#endif
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
}
static void __init bootmem_init_one_node(unsigned int nid)
{
unsigned long total_pages, paddr;
unsigned long end_pfn;
struct pglist_data *p;
p = NODE_DATA(nid);
/* Nothing to do.. */
if (!p->node_spanned_pages)
return;
end_pfn = pgdat_end_pfn(p);
total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
if (!paddr)
panic("Can't allocate bootmap for nid[%d]\n", nid);
init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
/*
* XXX Handle initial reservations for the system memory node
* only for the moment, we'll refactor this later for handling
* reservations in other nodes.
*/
if (nid == 0) {
struct memblock_region *reg;
/* Reserve the sections we're already using. */
for_each_memblock(reserved, reg) {
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
}
}
sparse_memory_present_with_active_regions(nid);
}
static void __init do_init_bootmem(void)
{
struct memblock_region *reg;
int i;
/* Add active regions with valid PFNs. */
for_each_memblock(memory, reg) {
@ -279,9 +235,12 @@ static void __init do_init_bootmem(void)
plat_mem_setup();
for_each_online_node(i)
bootmem_init_one_node(i);
for_each_memblock(memory, reg) {
int nid = memblock_get_region_node(reg);
memory_present(nid, memblock_region_memory_base_pfn(reg),
memblock_region_memory_end_pfn(reg));
}
sparse_init();
}
@ -322,7 +281,6 @@ void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
unsigned long vaddr, end;
int nid;
sh_mv.mv_mem_init();
@ -377,21 +335,7 @@ void __init paging_init(void)
kmap_coherent_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
unsigned long low, start_pfn;
start_pfn = pgdat->bdata->node_min_pfn;
low = pgdat->bdata->node_low_pfn;
if (max_zone_pfns[ZONE_NORMAL] < low)
max_zone_pfns[ZONE_NORMAL] = low;
printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
nid, start_pfn, low);
}
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
}

View File

@ -8,7 +8,6 @@
* for more details.
*/
#include <linux/module.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/numa.h>
@ -26,9 +25,7 @@ EXPORT_SYMBOL_GPL(node_data);
*/
void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
unsigned long bootmap_pages;
unsigned long start_pfn, end_pfn;
unsigned long bootmem_paddr;
/* Don't allow bogus node assignment */
BUG_ON(nid >= MAX_NUMNODES || nid <= 0);
@ -48,25 +45,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
SMP_CACHE_BYTES, end));
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
NODE_DATA(nid)->node_start_pfn = start_pfn;
NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
/* Node-local bootmap */
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
PAGE_SIZE, end);
init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
start_pfn, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
/* Reserve the pgdat and bootmap space with the bootmem allocator */
reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
sizeof(struct pglist_data), BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
/* It's up */
node_set_online(nid);

View File

@ -1 +0,0 @@
#include "../vdso-fakesections.c"

View File

@ -27,6 +27,7 @@
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/nospec.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
config = attr->config;
cache_type = (config >> 0) & 0xff;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
val = hw_cache_event_ids[cache_type][cache_op][cache_result];
@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event)
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
/*
* The generic map:
*/

View File

@ -92,6 +92,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "../perf_event.h"
@ -302,6 +303,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
} else if (event->pmu == &cstate_pkg_pmu) {
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
return -EINVAL;
cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
if (!pkg_msr[cfg].attr)
return -EINVAL;
event->hw.event_base = pkg_msr[cfg].msr;

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/perf_event.h>
#include <linux/nospec.h>
#include <asm/intel-family.h>
enum perf_msr_id {
@ -158,9 +159,6 @@ static int msr_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
if (cfg >= PERF_MSR_EVENT_MAX)
return -EINVAL;
/* unsupported modes and filters */
if (event->attr.exclude_user ||
event->attr.exclude_kernel ||
@ -171,6 +169,11 @@ static int msr_event_init(struct perf_event *event)
event->attr.sample_period) /* no sampling */
return -EINVAL;
if (cfg >= PERF_MSR_EVENT_MAX)
return -EINVAL;
cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
if (!msr[cfg].attr)
return -EINVAL;

View File

@ -29,7 +29,7 @@
#define KVM_FEATURE_PV_TLB_FLUSH 9
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
#define KVM_HINTS_DEDICATED 0
#define KVM_HINTS_REALTIME 0
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.

View File

@ -14,8 +14,11 @@
#include <asm/amd_nb.h>
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
#define PCI_DEVICE_ID_AMD_17H_DF_F4 0x1464
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@ -24,6 +27,7 @@ static u32 *flush_words;
static const struct pci_device_id amd_root_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
{}
};
@ -39,6 +43,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{}
};
@ -51,6 +56,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};

View File

@ -848,6 +848,11 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_power = edx;
}
if (c->extended_cpuid_level >= 0x80000008) {
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
}
if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
@ -871,7 +876,6 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
}
#ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))

View File

@ -94,6 +94,11 @@ static struct smca_bank_name smca_names[] = {
[SMCA_SMU] = { "smu", "System Management Unit" },
};
static u32 smca_bank_addrs[MAX_NR_BANKS][NR_BLOCKS] __ro_after_init =
{
[0 ... MAX_NR_BANKS - 1] = { [0 ... NR_BLOCKS - 1] = -1 }
};
const char *smca_get_name(enum smca_bank_types t)
{
if (t >= N_SMCA_BANK_TYPES)
@ -443,20 +448,26 @@ static u32 smca_get_block_address(unsigned int cpu, unsigned int bank,
if (!block)
return MSR_AMD64_SMCA_MCx_MISC(bank);
/* Check our cache first: */
if (smca_bank_addrs[bank][block] != -1)
return smca_bank_addrs[bank][block];
/*
* For SMCA enabled processors, BLKPTR field of the first MISC register
* (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4).
*/
if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
return addr;
goto out;
if (!(low & MCI_CONFIG_MCAX))
return addr;
goto out;
if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
(low & MASK_BLKPTR_LO))
return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
out:
smca_bank_addrs[bank][block] = addr;
return addr;
}
@ -468,18 +479,6 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
return addr;
/* Get address from already initialized block. */
if (per_cpu(threshold_banks, cpu)) {
struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
if (bankp && bankp->blocks) {
struct threshold_block *blockp = &bankp->blocks[block];
if (blockp)
return blockp->address;
}
}
if (mce_flags.smca)
return smca_get_block_address(cpu, bank, block);

View File

@ -457,7 +457,7 @@ static void __init sev_map_percpu_data(void)
static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
{
native_smp_prepare_cpus(max_cpus);
if (kvm_para_has_hint(KVM_HINTS_DEDICATED))
if (kvm_para_has_hint(KVM_HINTS_REALTIME))
static_branch_disable(&virt_spin_lock_key);
}
@ -553,7 +553,7 @@ static void __init kvm_guest_init(void)
}
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
@ -649,7 +649,7 @@ static __init int kvm_setup_pv_tlb_flush(void)
int cpu;
if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
!kvm_para_has_hint(KVM_HINTS_DEDICATED) &&
!kvm_para_has_hint(KVM_HINTS_REALTIME) &&
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
@ -745,7 +745,7 @@ void __init kvm_spinlock_init(void)
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return;
if (kvm_para_has_hint(KVM_HINTS_DEDICATED))
if (kvm_para_has_hint(KVM_HINTS_REALTIME))
return;
__pv_init_lock_hash();

View File

@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = {
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc_early.list),
};
/*
@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = {
.resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc.list),
};
void mark_tsc_unstable(char *reason)
@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason)
clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult) {
clocksource_mark_unstable(&clocksource_tsc);
} else {
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
clocksource_tsc.rating = 0;
}
clocksource_mark_unstable(&clocksource_tsc_early);
clocksource_mark_unstable(&clocksource_tsc);
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
/* Don't bother refining TSC on unstable systems */
if (tsc_unstable)
return;
goto unreg;
/*
* Since the work is started early in boot, we may be
@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work)
out:
if (tsc_unstable)
return;
goto unreg;
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early);
}
@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void)
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
return 0;
if (check_tsc_unstable())
return 0;
if (tsc_unstable)
goto unreg;
if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void)
if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early);
return 0;
}

View File

@ -1265,7 +1265,7 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
return 1;
return kvm_skip_emulated_instruction(vcpu);
}
static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
@ -1296,8 +1296,10 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
if (param & ~KVM_HYPERV_CONN_ID_MASK)
return HV_STATUS_INVALID_HYPERCALL_INPUT;
/* conn_to_evt is protected by vcpu->kvm->srcu */
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
rcu_read_lock();
eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param);
rcu_read_unlock();
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;

View File

@ -1463,23 +1463,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_restore(flags);
}
static void start_sw_period(struct kvm_lapic *apic)
{
if (!apic->lapic_timer.period)
return;
if (apic_lvtt_oneshot(apic) &&
ktime_after(ktime_get(),
apic->lapic_timer.target_expiration)) {
apic_timer_expired(apic);
return;
}
hrtimer_start(&apic->lapic_timer.timer,
apic->lapic_timer.target_expiration,
HRTIMER_MODE_ABS_PINNED);
}
static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
{
ktime_t now, remaining;
@ -1546,6 +1529,26 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
apic->lapic_timer.period);
}
static void start_sw_period(struct kvm_lapic *apic)
{
if (!apic->lapic_timer.period)
return;
if (ktime_after(ktime_get(),
apic->lapic_timer.target_expiration)) {
apic_timer_expired(apic);
if (apic_lvtt_oneshot(apic))
return;
advance_periodic_target_expiration(apic);
}
hrtimer_start(&apic->lapic_timer.timer,
apic->lapic_timer.target_expiration,
HRTIMER_MODE_ABS_PINNED);
}
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu))

View File

@ -1494,6 +1494,12 @@ static inline bool cpu_has_vmx_vmfunc(void)
SECONDARY_EXEC_ENABLE_VMFUNC;
}
static bool vmx_umip_emulated(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_DESC;
}
static inline bool report_flexpriority(void)
{
return flexpriority_enabled;
@ -4761,14 +4767,16 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
else
hw_cr4 |= KVM_PMODE_VM_CR4_ALWAYS_ON;
if ((cr4 & X86_CR4_UMIP) && !boot_cpu_has(X86_FEATURE_UMIP)) {
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC);
hw_cr4 &= ~X86_CR4_UMIP;
} else if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated()) {
if (cr4 & X86_CR4_UMIP) {
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC);
hw_cr4 &= ~X86_CR4_UMIP;
} else if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC))
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_DESC);
}
if (cr4 & X86_CR4_VMXE) {
/*
@ -9497,12 +9505,6 @@ static bool vmx_xsaves_supported(void)
SECONDARY_EXEC_XSAVES;
}
static bool vmx_umip_emulated(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_DESC;
}
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;

View File

@ -114,7 +114,7 @@ module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
static bool __read_mostly report_ignored_msrs = true;
module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
unsigned int min_timer_period_us = 500;
unsigned int min_timer_period_us = 200;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
static bool __read_mostly kvmclock_periodic_sync = true;
@ -843,7 +843,10 @@ EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
#ifdef CONFIG_X86_64
cr3 &= ~CR3_PCID_INVD;
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
if (pcid_enabled)
cr3 &= ~CR3_PCID_INVD;
#endif
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
@ -6671,12 +6674,13 @@ void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
int op_64_bit, r;
int op_64_bit;
r = kvm_skip_emulated_instruction(vcpu);
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
if (kvm_hv_hypercall_enabled(vcpu->kvm)) {
if (!kvm_hv_hypercall(vcpu))
return 0;
goto out;
}
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
@ -6697,7 +6701,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
ret = -KVM_EPERM;
goto out;
goto out_error;
}
switch (nr) {
@ -6717,12 +6721,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
ret = -KVM_ENOSYS;
break;
}
out:
out_error:
if (!op_64_bit)
ret = (u32)ret;
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
out:
++vcpu->stat.hypercalls;
return r;
return kvm_skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);

View File

@ -65,6 +65,19 @@ static void __init xen_hvm_init_mem_mapping(void)
{
early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
/*
* The virtual address of the shared_info page has changed, so
* the vcpu_info pointer for VCPU 0 is now stale.
*
* The prepare_boot_cpu callback will re-initialize it via
* xen_vcpu_setup, but we can't rely on that to be called for
* old Xen versions (xen_have_vector_callback == 0).
*
* It is, in any case, bad to have a stale vcpu_info pointer
* so reset it now.
*/
xen_vcpu_info_reset(0);
}
static void __init init_hvm_pv_info(void)

View File

@ -42,13 +42,11 @@ xmaddr_t arbitrary_virt_to_machine(void *vaddr)
}
EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
static void xen_flush_tlb_all(void)
static noinline void xen_flush_tlb_all(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb_all(0);
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));

View File

@ -1310,13 +1310,11 @@ unsigned long xen_read_cr2_direct(void)
return this_cpu_read(xen_vcpu_info.arch.cr2);
}
static void xen_flush_tlb(void)
static noinline void xen_flush_tlb(void)
{
struct mmuext_op *op;
struct multicall_space mcs;
trace_xen_mmu_flush_tlb(0);
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));

View File

@ -56,6 +56,10 @@ acpi_status acpi_ns_initialize_objects(void);
acpi_status acpi_ns_initialize_devices(u32 flags);
acpi_status
acpi_ns_init_one_package(acpi_handle obj_handle,
u32 level, void *context, void **return_value);
/*
* nsload - Namespace loading
*/

View File

@ -174,6 +174,13 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
/* Complete the initialization/resolution of package objects */
status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, 0,
acpi_ns_init_one_package, NULL, NULL,
NULL);
/* Parameter Data (optional) */
if (parameter_node) {
@ -430,6 +437,13 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status);
}
/* Complete the initialization/resolution of package objects */
status = acpi_ns_walk_namespace(ACPI_TYPE_PACKAGE, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX, 0,
acpi_ns_init_one_package, NULL, NULL,
NULL);
/* Store the ddb_handle into the Target operand */
status = acpi_ex_store(ddb_handle, target, walk_state);

View File

@ -240,6 +240,58 @@ acpi_status acpi_ns_initialize_devices(u32 flags)
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_init_one_package
*
* PARAMETERS: obj_handle - Node
* level - Current nesting level
* context - Not used
* return_value - Not used
*
* RETURN: Status
*
* DESCRIPTION: Callback from acpi_walk_namespace. Invoked for every package
* within the namespace. Used during dynamic load of an SSDT.
*
******************************************************************************/
acpi_status
acpi_ns_init_one_package(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
acpi_status status;
union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node =
(struct acpi_namespace_node *)obj_handle;
obj_desc = acpi_ns_get_attached_object(node);
if (!obj_desc) {
return (AE_OK);
}
/* Exit if package is already initialized */
if (obj_desc->package.flags & AOPOBJ_DATA_VALID) {
return (AE_OK);
}
status = acpi_ds_get_package_arguments(obj_desc);
if (ACPI_FAILURE(status)) {
return (AE_OK);
}
status =
acpi_ut_walk_package_tree(obj_desc, NULL,
acpi_ds_init_package_element, NULL);
if (ACPI_FAILURE(status)) {
return (AE_OK);
}
obj_desc->package.flags |= AOPOBJ_DATA_VALID;
return (AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_init_one_object
@ -360,27 +412,11 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
case ACPI_TYPE_PACKAGE:
/* Complete the initialization/resolution of the package object */
info->package_init++;
status = acpi_ds_get_package_arguments(obj_desc);
if (ACPI_FAILURE(status)) {
break;
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_PARSE,
"%s: Completing resolution of Package elements\n",
ACPI_GET_FUNCTION_NAME));
/*
* Resolve all named references in package objects (and all
* sub-packages). This action has been deferred until the entire
* namespace has been loaded, in order to support external and
* forward references from individual package elements (05/2017).
*/
status = acpi_ut_walk_package_tree(obj_desc, NULL,
acpi_ds_init_package_element,
NULL);
obj_desc->package.flags |= AOPOBJ_DATA_VALID;
status =
acpi_ns_init_one_package(obj_handle, level, NULL, NULL);
break;
default:

View File

@ -698,7 +698,7 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
deadline, &online, NULL);
@ -724,7 +724,7 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
bool online;
int rc;
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
@ -788,7 +788,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
for (i = 0; i < 2; i++) {
u16 val;

View File

@ -350,7 +350,6 @@ struct ahci_host_priv {
u32 em_msg_type; /* EM message type */
bool got_runtime_pm; /* Did we do pm_runtime_get? */
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
struct reset_control *rsts; /* Optional */
struct regulator **target_pwrs; /* Optional */
/*
* If platform uses PHYs. There is a 1:1 relation between the port number and
@ -366,6 +365,13 @@ struct ahci_host_priv {
* be overridden anytime before the host is activated.
*/
void (*start_engine)(struct ata_port *ap);
/*
* Optional ahci_stop_engine override, if not set this gets set to the
* default ahci_stop_engine during ahci_save_initial_config, this can
* be overridden anytime before the host is activated.
*/
int (*stop_engine)(struct ata_port *ap);
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
/* only required for per-port MSI(-X) support */

View File

@ -62,6 +62,60 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
}
/**
* ahci_mvebu_stop_engine
*
* @ap: Target ata port
*
* Errata Ref#226 - SATA Disk HOT swap issue when connected through
* Port Multiplier in FIS-based Switching mode.
*
* To avoid the issue, according to design, the bits[11:8, 0] of
* register PxFBS are cleared when Port Command and Status (0x18) bit[0]
* changes its value from 1 to 0, i.e. falling edge of Port
* Command and Status bit[0] sends PULSE that resets PxFBS
* bits[11:8; 0].
*
* This function is used to override function of "ahci_stop_engine"
* from libahci.c by adding the mvebu work around(WA) to save PxFBS
* value before the PxCMD ST write of 0, then restore PxFBS value.
*
* Return: 0 on success; Error code otherwise.
*/
int ahci_mvebu_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp, port_fbs;
tmp = readl(port_mmio + PORT_CMD);
/* check if the HBA is idle */
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
return 0;
/* save the port PxFBS register for later restore */
port_fbs = readl(port_mmio + PORT_FBS);
/* setting HBA to idle */
tmp &= ~PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
/*
* bit #15 PxCMD signal doesn't clear PxFBS,
* restore the PxFBS register right after clearing the PxCMD ST,
* no need to wait for the PxCMD bit #15.
*/
writel(port_fbs, port_mmio + PORT_FBS);
/* wait for engine to stop. This could be as long as 500 msec */
tmp = ata_wait_register(ap, port_mmio + PORT_CMD,
PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
if (tmp & PORT_CMD_LIST_ON)
return -EIO;
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
{
@ -112,6 +166,8 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
if (rc)
return rc;
hpriv->stop_engine = ahci_mvebu_stop_engine;
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-380-ahci")) {
dram = mv_mbus_dram_info();

View File

@ -96,7 +96,7 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
/*
* There is a errata on ls1021a Rev1.0 and Rev2.0 which is:

View File

@ -165,7 +165,7 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
PORT_CMD_ISSUE, 0x0, 1, 100))
return -EBUSY;
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
ahci_start_fis_rx(ap);
/*
@ -421,7 +421,7 @@ static int xgene_ahci_hardreset(struct ata_link *link, unsigned int *class,
portrxfis_saved = readl(port_mmio + PORT_FIS_ADDR);
portrxfishi_saved = readl(port_mmio + PORT_FIS_ADDR_HI);
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
rc = xgene_ahci_do_hardreset(link, deadline, &online);

View File

@ -560,6 +560,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
if (!hpriv->start_engine)
hpriv->start_engine = ahci_start_engine;
if (!hpriv->stop_engine)
hpriv->stop_engine = ahci_stop_engine;
if (!hpriv->irq_handler)
hpriv->irq_handler = ahci_single_level_irq_intr;
}
@ -897,9 +900,10 @@ static void ahci_start_port(struct ata_port *ap)
static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
{
int rc;
struct ahci_host_priv *hpriv = ap->host->private_data;
/* disable DMA */
rc = ahci_stop_engine(ap);
rc = hpriv->stop_engine(ap);
if (rc) {
*emsg = "failed to stop engine";
return rc;
@ -1310,7 +1314,7 @@ int ahci_kick_engine(struct ata_port *ap)
int busy, rc;
/* stop engine */
rc = ahci_stop_engine(ap);
rc = hpriv->stop_engine(ap);
if (rc)
goto out_restart;
@ -1549,7 +1553,7 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);
@ -2075,14 +2079,14 @@ void ahci_error_handler(struct ata_port *ap)
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
hpriv->start_engine(ap);
}
sata_pmp_error_handler(ap);
if (!ata_dev_enabled(ap->link.device))
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
}
EXPORT_SYMBOL_GPL(ahci_error_handler);
@ -2129,7 +2133,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
return;
/* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
rc = ahci_stop_engine(ap);
rc = hpriv->stop_engine(ap);
if (rc)
return;
@ -2189,7 +2193,7 @@ static void ahci_enable_fbs(struct ata_port *ap)
return;
}
rc = ahci_stop_engine(ap);
rc = hpriv->stop_engine(ap);
if (rc)
return;
@ -2222,7 +2226,7 @@ static void ahci_disable_fbs(struct ata_port *ap)
return;
}
rc = ahci_stop_engine(ap);
rc = hpriv->stop_engine(ap);
if (rc)
return;

View File

@ -25,7 +25,6 @@
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/of_platform.h>
#include <linux/reset.h>
#include "ahci.h"
static void ahci_host_stop(struct ata_host *host);
@ -196,8 +195,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
* following order:
* 1) Regulator
* 2) Clocks (through ahci_platform_enable_clks)
* 3) Resets
* 4) Phys
* 3) Phys
*
* If resource enabling fails at any point the previous enabled resources
* are disabled in reverse order.
@ -217,19 +215,12 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
if (rc)
goto disable_regulator;
rc = reset_control_deassert(hpriv->rsts);
rc = ahci_platform_enable_phys(hpriv);
if (rc)
goto disable_clks;
rc = ahci_platform_enable_phys(hpriv);
if (rc)
goto disable_resets;
return 0;
disable_resets:
reset_control_assert(hpriv->rsts);
disable_clks:
ahci_platform_disable_clks(hpriv);
@ -248,15 +239,12 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
* following order:
* 1) Phys
* 2) Clocks (through ahci_platform_disable_clks)
* 3) Resets
* 4) Regulator
* 3) Regulator
*/
void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
{
ahci_platform_disable_phys(hpriv);
reset_control_assert(hpriv->rsts);
ahci_platform_disable_clks(hpriv);
ahci_platform_disable_regulators(hpriv);
@ -405,12 +393,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
hpriv->clks[i] = clk;
}
hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(hpriv->rsts)) {
rc = PTR_ERR(hpriv->rsts);
goto err_out;
}
hpriv->nports = child_nodes = of_get_child_count(dev->of_node);
/*

View File

@ -4549,6 +4549,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM, },
/* This specific Samsung model/firmware-rev does not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
/* Sandisk devices which are known to not handle LPM well */
{ "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },

View File

@ -175,8 +175,8 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
{ }
#endif /* CONFIG_PM */
static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
va_list args)
static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info *ehi,
const char *fmt, va_list args)
{
ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
ATA_EH_DESC_LEN - ehi->desc_len,

View File

@ -410,7 +410,7 @@ static int ahci_highbank_hardreset(struct ata_link *link, unsigned int *class,
int rc;
int retry = 100;
ahci_stop_engine(ap);
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(link->device, &tf);

View File

@ -285,13 +285,13 @@ static const struct sil24_cerr_info {
[PORT_CERR_INCONSISTENT] = { AC_ERR_HSM, ATA_EH_RESET,
"protocol mismatch" },
[PORT_CERR_DIRECTION] = { AC_ERR_HSM, ATA_EH_RESET,
"data directon mismatch" },
"data direction mismatch" },
[PORT_CERR_UNDERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
"ran out of SGEs while writing" },
[PORT_CERR_OVERRUN] = { AC_ERR_HSM, ATA_EH_RESET,
"ran out of SGEs while reading" },
[PORT_CERR_PKT_PROT] = { AC_ERR_HSM, ATA_EH_RESET,
"invalid data directon for ATAPI CDB" },
"invalid data direction for ATAPI CDB" },
[PORT_CERR_SGT_BOUNDARY] = { AC_ERR_SYSTEM, ATA_EH_RESET,
"SGT not on qword boundary" },
[PORT_CERR_SGT_TGTABRT] = { AC_ERR_HOST_BUS, ATA_EH_RESET,

View File

@ -191,7 +191,7 @@ static char *res_strings[] = {
"reserved 37",
"reserved 38",
"reserved 39",
"reseverd 40",
"reserved 40",
"reserved 41",
"reserved 42",
"reserved 43",

View File

@ -28,6 +28,7 @@
#include <asm/io.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
#include "uPD98401.h"
#include "uPD98402.h"
@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
pool = array_index_nospec(pool,
ZATM_LAST_POOL + 1);
spin_lock_irqsave(&zatm_dev->lock, flags);
info = zatm_dev->pool_info[pool];
if (cmd == ZATM_GETPOOLZ) {

View File

@ -2366,7 +2366,9 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
osd_req_op_cls_init(obj_req->osd_req, 0, CEPH_OSD_OP_CALL, "rbd",
"copyup");
osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
obj_req->copyup_bvecs, bytes);
obj_req->copyup_bvecs,
obj_req->copyup_bvec_count,
bytes);
switch (obj_req->img_request->op_type) {
case OBJ_OP_WRITE:

View File

@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@ -399,6 +399,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
},
},
{
/* Dell XPS 9360 (QCA ROME device 0cf3:e300) */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
},
},
{}
};
@ -2852,6 +2859,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
#endif
static void btusb_check_needs_reset_resume(struct usb_interface *intf)
{
if (dmi_check_system(btusb_needs_reset_resume_table))
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@ -2974,9 +2987,6 @@ static int btusb_probe(struct usb_interface *intf,
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
if (dmi_check_system(btusb_needs_reset_resume_table))
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
if (err)
@ -3064,6 +3074,7 @@ static int btusb_probe(struct usb_interface *intf,
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
btusb_check_needs_reset_resume(intf);
}
#ifdef CONFIG_BT_HCIBTUSB_RTL

View File

@ -195,7 +195,7 @@ static int uninorth_insert_memory(struct agp_memory *mem, off_t pg_start, int ty
return 0;
}
int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
static int uninorth_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
{
size_t i;
u32 *gp;
@ -470,7 +470,7 @@ static int uninorth_free_gatt_table(struct agp_bridge_data *bridge)
return 0;
}
void null_cache_flush(void)
static void null_cache_flush(void)
{
mb();
}

View File

@ -266,15 +266,13 @@ config COMMON_CLK_STM32MP157
Support for stm32mp157 SoC family clocks
config COMMON_CLK_STM32F
bool "Clock driver for stm32f4 and stm32f7 SoC families"
depends on MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746
def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746)
help
---help---
Support for stm32f4 and stm32f7 SoC families clocks
config COMMON_CLK_STM32H7
bool "Clock driver for stm32h7 SoC family"
depends on MACH_STM32H743
def_bool COMMON_CLK && MACH_STM32H743
help
---help---
Support for stm32h7 SoC family clocks

View File

@ -464,7 +464,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_rate(clks[IMX6UL_CLK_AHB], 99000000);
/* Change periph_pre clock to pll2_bus to adjust AXI rate to 264MHz */
clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_PLL3_USB_OTG]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH_CLK2_SEL], clks[IMX6UL_CLK_OSC]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_CLK2]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH_PRE], clks[IMX6UL_CLK_PLL2_BUS]);
clk_set_parent(clks[IMX6UL_CLK_PERIPH], clks[IMX6UL_CLK_PERIPH_PRE]);

View File

@ -20,7 +20,7 @@ config ACPI_CPPC_CPUFREQ
config ARM_ARMADA_37XX_CPUFREQ
tristate "Armada 37xx CPUFreq support"
depends on ARCH_MVEBU
depends on ARCH_MVEBU && CPUFREQ_DT
help
This adds the CPUFreq driver support for Marvell Armada 37xx SoCs.
The Armada 37xx PMU supports 4 frequency and VDD levels.

View File

@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
if (set)
reg |= bit;
else
reg &= bit;
reg &= ~bit;
iowrite32(reg, addr);
spin_unlock_irqrestore(&gpio->lock, flags);

View File

@ -116,9 +116,9 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long port_state;
u8 __iomem ports[] = {
idio16gpio->reg->out0_7, idio16gpio->reg->out8_15,
idio16gpio->reg->in0_7, idio16gpio->reg->in8_15,
void __iomem *ports[] = {
&idio16gpio->reg->out0_7, &idio16gpio->reg->out8_15,
&idio16gpio->reg->in0_7, &idio16gpio->reg->in8_15,
};
/* clear bits array to a clean slate */
@ -143,7 +143,7 @@ static int idio_16_gpio_get_multiple(struct gpio_chip *chip,
}
/* read bits from current gpio port */
port_state = ioread8(ports + i);
port_state = ioread8(ports[i]);
/* store acquired bits at respective bits array offset */
bits[word_index] |= port_state << word_offset;

View File

@ -206,10 +206,10 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
unsigned long word_mask;
const unsigned long port_mask = GENMASK(gpio_reg_size - 1, 0);
unsigned long port_state;
u8 __iomem ports[] = {
idio24gpio->reg->out0_7, idio24gpio->reg->out8_15,
idio24gpio->reg->out16_23, idio24gpio->reg->in0_7,
idio24gpio->reg->in8_15, idio24gpio->reg->in16_23,
void __iomem *ports[] = {
&idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
&idio24gpio->reg->out16_23, &idio24gpio->reg->in0_7,
&idio24gpio->reg->in8_15, &idio24gpio->reg->in16_23,
};
const unsigned long out_mode_mask = BIT(1);
@ -217,7 +217,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
bitmap_zero(bits, chip->ngpio);
/* get bits are evaluated a gpio port register at a time */
for (i = 0; i < ARRAY_SIZE(ports); i++) {
for (i = 0; i < ARRAY_SIZE(ports) + 1; i++) {
/* gpio offset in bits array */
bits_offset = i * gpio_reg_size;
@ -236,7 +236,7 @@ static int idio_24_gpio_get_multiple(struct gpio_chip *chip,
/* read bits from current gpio port (port 6 is TTL GPIO) */
if (i < 6)
port_state = ioread8(ports + i);
port_state = ioread8(ports[i]);
else if (ioread8(&idio24gpio->reg->ctl) & out_mode_mask)
port_state = ioread8(&idio24gpio->reg->ttl_out0_7);
else
@ -301,9 +301,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
const unsigned long port_mask = GENMASK(gpio_reg_size, 0);
unsigned long flags;
unsigned int out_state;
u8 __iomem ports[] = {
idio24gpio->reg->out0_7, idio24gpio->reg->out8_15,
idio24gpio->reg->out16_23
void __iomem *ports[] = {
&idio24gpio->reg->out0_7, &idio24gpio->reg->out8_15,
&idio24gpio->reg->out16_23
};
const unsigned long out_mode_mask = BIT(1);
const unsigned int ttl_offset = 48;
@ -327,9 +327,9 @@ static void idio_24_gpio_set_multiple(struct gpio_chip *chip,
raw_spin_lock_irqsave(&idio24gpio->lock, flags);
/* process output lines */
out_state = ioread8(ports + i) & ~gpio_mask;
out_state = ioread8(ports[i]) & ~gpio_mask;
out_state |= (*bits >> bits_offset) & gpio_mask;
iowrite8(out_state, ports + i);
iowrite8(out_state, ports[i]);
raw_spin_unlock_irqrestore(&idio24gpio->lock, flags);
}

View File

@ -497,7 +497,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
struct gpiohandle_request handlereq;
struct linehandle_state *lh;
struct file *file;
int fd, i, ret;
int fd, i, count = 0, ret;
u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@ -558,6 +558,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_descs;
lh->descs[i] = desc;
count = i;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@ -628,7 +629,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
out_put_unused_fd:
put_unused_fd(fd);
out_free_descs:
for (; i >= 0; i--)
for (i = 0; i < count; i++)
gpiod_free(lh->descs[i]);
kfree(lh->label);
out_free_lh:
@ -902,7 +903,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
desc = &gdev->descs[offset];
ret = gpiod_request(desc, le->label);
if (ret)
goto out_free_desc;
goto out_free_label;
le->desc = desc;
le->eflags = eflags;

View File

@ -419,9 +419,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
if (other) {
signed long r;
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
r = dma_fence_wait(other, true);
if (r < 0) {
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
if (r != -ERESTARTSYS)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
return r;
}
}

Some files were not shown because too many files have changed in this diff Show More