mirror of https://gitee.com/openkylin/linux.git
Merge branch 'linus' into x86/apic, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
416b0c0faf
|
@ -228,7 +228,7 @@ Learning on the device port should be enabled, as well as learning_sync:
|
|||
bridge link set dev DEV learning on self
|
||||
bridge link set dev DEV learning_sync on self
|
||||
|
||||
Learning_sync attribute enables syncing of the learned/forgotton FDB entry to
|
||||
Learning_sync attribute enables syncing of the learned/forgotten FDB entry to
|
||||
the bridge's FDB. It's possible, but not optimal, to enable learning on the
|
||||
device port and on the bridge port, and disable learning_sync.
|
||||
|
||||
|
@ -245,7 +245,7 @@ the responsibility of the port driver/device to age out these entries. If the
|
|||
port device supports ageing, when the FDB entry expires, it will notify the
|
||||
driver which in turn will notify the bridge with SWITCHDEV_FDB_DEL. If the
|
||||
device does not support ageing, the driver can simulate ageing using a
|
||||
garbage collection timer to monitor FBD entries. Expired entries will be
|
||||
garbage collection timer to monitor FDB entries. Expired entries will be
|
||||
notified to the bridge using SWITCHDEV_FDB_DEL. See rocker driver for
|
||||
example of driver running ageing timer.
|
||||
|
||||
|
|
|
@ -35,9 +35,34 @@ Table : Subdirectories in /proc/sys/net
|
|||
bpf_jit_enable
|
||||
--------------
|
||||
|
||||
This enables Berkeley Packet Filter Just in Time compiler.
|
||||
Currently supported on x86_64 architecture, bpf_jit provides a framework
|
||||
to speed packet filtering, the one used by tcpdump/libpcap for example.
|
||||
This enables the BPF Just in Time (JIT) compiler. BPF is a flexible
|
||||
and efficient infrastructure allowing to execute bytecode at various
|
||||
hook points. It is used in a number of Linux kernel subsystems such
|
||||
as networking (e.g. XDP, tc), tracing (e.g. kprobes, uprobes, tracepoints)
|
||||
and security (e.g. seccomp). LLVM has a BPF back end that can compile
|
||||
restricted C into a sequence of BPF instructions. After program load
|
||||
through bpf(2) and passing a verifier in the kernel, a JIT will then
|
||||
translate these BPF proglets into native CPU instructions. There are
|
||||
two flavors of JITs, the newer eBPF JIT currently supported on:
|
||||
- x86_64
|
||||
- arm64
|
||||
- ppc64
|
||||
- sparc64
|
||||
- mips64
|
||||
- s390x
|
||||
|
||||
And the older cBPF JIT supported on the following archs:
|
||||
- arm
|
||||
- mips
|
||||
- ppc
|
||||
- sparc
|
||||
|
||||
eBPF JITs are a superset of cBPF JITs, meaning the kernel will
|
||||
migrate cBPF instructions into eBPF instructions and then JIT
|
||||
compile them transparently. Older cBPF JITs can only translate
|
||||
tcpdump filters, seccomp rules, etc, but not mentioned eBPF
|
||||
programs loaded through bpf(2).
|
||||
|
||||
Values :
|
||||
0 - disable the JIT (default value)
|
||||
1 - enable the JIT
|
||||
|
@ -46,9 +71,9 @@ Values :
|
|||
bpf_jit_harden
|
||||
--------------
|
||||
|
||||
This enables hardening for the Berkeley Packet Filter Just in Time compiler.
|
||||
Supported are eBPF JIT backends. Enabling hardening trades off performance,
|
||||
but can mitigate JIT spraying.
|
||||
This enables hardening for the BPF JIT compiler. Supported are eBPF
|
||||
JIT backends. Enabling hardening trades off performance, but can
|
||||
mitigate JIT spraying.
|
||||
Values :
|
||||
0 - disable JIT hardening (default value)
|
||||
1 - enable JIT hardening for unprivileged users only
|
||||
|
@ -57,11 +82,11 @@ Values :
|
|||
bpf_jit_kallsyms
|
||||
----------------
|
||||
|
||||
When Berkeley Packet Filter Just in Time compiler is enabled, then compiled
|
||||
images are unknown addresses to the kernel, meaning they neither show up in
|
||||
traces nor in /proc/kallsyms. This enables export of these addresses, which
|
||||
can be used for debugging/tracing. If bpf_jit_harden is enabled, this feature
|
||||
is disabled.
|
||||
When BPF JIT compiler is enabled, then compiled images are unknown
|
||||
addresses to the kernel, meaning they neither show up in traces nor
|
||||
in /proc/kallsyms. This enables export of these addresses, which can
|
||||
be used for debugging/tracing. If bpf_jit_harden is enabled, this
|
||||
feature is disabled.
|
||||
Values :
|
||||
0 - disable JIT kallsyms export (default value)
|
||||
1 - enable JIT kallsyms export for privileged users only
|
||||
|
|
15
Makefile
15
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 13
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -396,7 +396,7 @@ LINUXINCLUDE := \
|
|||
KBUILD_CPPFLAGS := -D__KERNEL__
|
||||
|
||||
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -fno-common \
|
||||
-fno-strict-aliasing -fno-common -fshort-wchar \
|
||||
-Werror-implicit-function-declaration \
|
||||
-Wno-format-security \
|
||||
-std=gnu89 $(call cc-option,-fno-PIE)
|
||||
|
@ -442,7 +442,7 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
|
|||
# ===========================================================================
|
||||
# Rules shared between *config targets and build targets
|
||||
|
||||
# Basic helpers built in scripts/
|
||||
# Basic helpers built in scripts/basic/
|
||||
PHONY += scripts_basic
|
||||
scripts_basic:
|
||||
$(Q)$(MAKE) $(build)=scripts/basic
|
||||
|
@ -505,7 +505,7 @@ ifeq ($(KBUILD_EXTMOD),)
|
|||
endif
|
||||
endif
|
||||
endif
|
||||
# install and module_install need also be processed one by one
|
||||
# install and modules_install need also be processed one by one
|
||||
ifneq ($(filter install,$(MAKECMDGOALS)),)
|
||||
ifneq ($(filter modules_install,$(MAKECMDGOALS)),)
|
||||
mixed-targets := 1
|
||||
|
@ -964,7 +964,7 @@ export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y2) $(drivers-y) $(net-y) $(virt-
|
|||
export KBUILD_VMLINUX_LIBS := $(libs-y1)
|
||||
export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
|
||||
export LDFLAGS_vmlinux
|
||||
# used by scripts/pacmage/Makefile
|
||||
# used by scripts/package/Makefile
|
||||
export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Documentation include samples scripts tools)
|
||||
|
||||
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN) $(KBUILD_VMLINUX_LIBS)
|
||||
|
@ -992,8 +992,8 @@ include/generated/autoksyms.h: FORCE
|
|||
ARCH_POSTLINK := $(wildcard $(srctree)/arch/$(SRCARCH)/Makefile.postlink)
|
||||
|
||||
# Final link of vmlinux with optional arch pass after final link
|
||||
cmd_link-vmlinux = \
|
||||
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
|
||||
cmd_link-vmlinux = \
|
||||
$(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) ; \
|
||||
$(if $(ARCH_POSTLINK), $(MAKE) -f $(ARCH_POSTLINK) $@, true)
|
||||
|
||||
vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
|
||||
|
@ -1184,6 +1184,7 @@ PHONY += kselftest
|
|||
kselftest:
|
||||
$(Q)$(MAKE) -C tools/testing/selftests run_tests
|
||||
|
||||
PHONY += kselftest-clean
|
||||
kselftest-clean:
|
||||
$(Q)$(MAKE) -C tools/testing/selftests clean
|
||||
|
||||
|
|
|
@ -96,7 +96,6 @@ menu "ARC Architecture Configuration"
|
|||
|
||||
menu "ARC Platform/SoC/Board"
|
||||
|
||||
source "arch/arc/plat-sim/Kconfig"
|
||||
source "arch/arc/plat-tb10x/Kconfig"
|
||||
source "arch/arc/plat-axs10x/Kconfig"
|
||||
#New platform adds here
|
||||
|
|
|
@ -107,7 +107,7 @@ core-y += arch/arc/
|
|||
# w/o this dtb won't embed into kernel binary
|
||||
core-y += arch/arc/boot/dts/
|
||||
|
||||
core-$(CONFIG_ARC_PLAT_SIM) += arch/arc/plat-sim/
|
||||
core-y += arch/arc/plat-sim/
|
||||
core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/
|
||||
core-$(CONFIG_ARC_PLAT_AXS10X) += arch/arc/plat-axs10x/
|
||||
core-$(CONFIG_ARC_PLAT_EZNPS) += arch/arc/plat-eznps/
|
||||
|
|
|
@ -15,15 +15,15 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
cpu_card {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
|
||||
|
||||
core_clk: core_clk {
|
||||
#clock-cells = <0>;
|
||||
|
@ -91,23 +91,21 @@ arcpct0: pct {
|
|||
mb_intc: dw-apb-ictl@0xe0012000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "snps,dw-apb-ictl";
|
||||
reg = < 0xe0012000 0x200 >;
|
||||
reg = < 0x0 0xe0012000 0x0 0x200 >;
|
||||
interrupt-controller;
|
||||
interrupt-parent = <&core_intc>;
|
||||
interrupts = < 7 >;
|
||||
};
|
||||
|
||||
memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0x80000000 0x20000000>;
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0x1b000000>; /* (512 - 32) MiB */
|
||||
/* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x1b000000>; /* (512 - 32) MiB */
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
/*
|
||||
* We just move frame buffer area to the very end of
|
||||
|
@ -118,7 +116,7 @@ reserved-memory {
|
|||
*/
|
||||
frame_buffer: frame_buffer@9e000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0x9e000000 0x2000000>;
|
||||
reg = <0x0 0x9e000000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -14,15 +14,15 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
cpu_card {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
|
||||
|
||||
core_clk: core_clk {
|
||||
#clock-cells = <0>;
|
||||
|
@ -94,30 +94,29 @@ arcpct0: pct {
|
|||
mb_intc: dw-apb-ictl@0xe0012000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "snps,dw-apb-ictl";
|
||||
reg = < 0xe0012000 0x200 >;
|
||||
reg = < 0x0 0xe0012000 0x0 0x200 >;
|
||||
interrupt-controller;
|
||||
interrupt-parent = <&core_intc>;
|
||||
interrupts = < 24 >;
|
||||
};
|
||||
|
||||
memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0x80000000 0x40000000>;
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0x20000000>; /* 512MiB */
|
||||
/* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
|
||||
0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
/*
|
||||
* Move frame buffer out of IOC aperture (0x8z-0xAz).
|
||||
*/
|
||||
frame_buffer: frame_buffer@be000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0xbe000000 0x2000000>;
|
||||
reg = <0x0 0xbe000000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -14,15 +14,15 @@
|
|||
|
||||
/ {
|
||||
compatible = "snps,arc";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
cpu_card {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
ranges = <0x00000000 0xf0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xf0000000 0x10000000>;
|
||||
|
||||
core_clk: core_clk {
|
||||
#clock-cells = <0>;
|
||||
|
@ -100,30 +100,29 @@ arcpct0: pct {
|
|||
mb_intc: dw-apb-ictl@0xe0012000 {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "snps,dw-apb-ictl";
|
||||
reg = < 0xe0012000 0x200 >;
|
||||
reg = < 0x0 0xe0012000 0x0 0x200 >;
|
||||
interrupt-controller;
|
||||
interrupt-parent = <&idu_intc>;
|
||||
interrupts = <0>;
|
||||
};
|
||||
|
||||
memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0x80000000 0x40000000>;
|
||||
device_type = "memory";
|
||||
reg = <0x80000000 0x20000000>; /* 512MiB */
|
||||
/* CONFIG_KERNEL_RAM_BASE_ADDRESS needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MiB low mem */
|
||||
0x1 0xc0000000 0x0 0x40000000>; /* 1 GiB highmem */
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
/*
|
||||
* Move frame buffer out of IOC aperture (0x8z-0xAz).
|
||||
*/
|
||||
frame_buffer: frame_buffer@be000000 {
|
||||
compatible = "shared-dma-pool";
|
||||
reg = <0xbe000000 0x2000000>;
|
||||
reg = <0x0 0xbe000000 0x0 0x2000000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -13,7 +13,7 @@ axs10x_mb {
|
|||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges = <0x00000000 0xe0000000 0x10000000>;
|
||||
ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
|
||||
interrupt-parent = <&mb_intc>;
|
||||
|
||||
i2sclk: i2sclk@100a0 {
|
||||
|
|
|
@ -21,7 +21,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs"
|
||||
CONFIG_PREEMPT=y
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="haps_hs_idu"
|
||||
|
|
|
@ -39,7 +39,6 @@ CONFIG_IP_PNP=y
|
|||
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
|
||||
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
# CONFIG_INET_LRO is not set
|
||||
# CONFIG_INET_DIAG is not set
|
||||
# CONFIG_IPV6 is not set
|
||||
# CONFIG_WIRELESS is not set
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
|
||||
CONFIG_PREEMPT=y
|
||||
# CONFIG_COMPACTION is not set
|
||||
|
|
|
@ -26,7 +26,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs"
|
||||
CONFIG_PREEMPT=y
|
||||
|
|
|
@ -24,7 +24,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_NET=y
|
||||
|
|
|
@ -23,7 +23,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
|
||||
# CONFIG_COMPACTION is not set
|
||||
|
|
|
@ -18,7 +18,6 @@ CONFIG_MODULES=y
|
|||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
CONFIG_ARC_PLAT_SIM=y
|
||||
CONFIG_ISA_ARCV2=y
|
||||
CONFIG_SMP=y
|
||||
# CONFIG_ARC_TIMERS_64BIT is not set
|
||||
|
|
|
@ -38,7 +38,6 @@ CONFIG_IP_MULTICAST=y
|
|||
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
|
||||
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
|
||||
# CONFIG_INET_XFRM_MODE_BEET is not set
|
||||
# CONFIG_INET_LRO is not set
|
||||
# CONFIG_INET_DIAG is not set
|
||||
# CONFIG_IPV6 is not set
|
||||
# CONFIG_WIRELESS is not set
|
||||
|
|
|
@ -96,7 +96,9 @@ extern unsigned long perip_base, perip_end;
|
|||
#define ARC_REG_SLC_FLUSH 0x904
|
||||
#define ARC_REG_SLC_INVALIDATE 0x905
|
||||
#define ARC_REG_SLC_RGN_START 0x914
|
||||
#define ARC_REG_SLC_RGN_START1 0x915
|
||||
#define ARC_REG_SLC_RGN_END 0x916
|
||||
#define ARC_REG_SLC_RGN_END1 0x917
|
||||
|
||||
/* Bit val in SLC_CONTROL */
|
||||
#define SLC_CTRL_DIS 0x001
|
||||
|
|
|
@ -94,6 +94,8 @@ static inline int is_pae40_enabled(void)
|
|||
return IS_ENABLED(CONFIG_ARC_HAS_PAE40);
|
||||
}
|
||||
|
||||
extern int pae40_exist_but_not_enab(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -75,10 +75,20 @@ void arc_init_IRQ(void)
|
|||
* Set a default priority for all available interrupts to prevent
|
||||
* switching of register banks if Fast IRQ and multiple register banks
|
||||
* are supported by CPU.
|
||||
* Also disable private-per-core IRQ lines so faulty external HW won't
|
||||
* trigger interrupt that kernel is not ready to handle.
|
||||
*/
|
||||
for (i = NR_EXCEPTIONS; i < irq_bcr.irqs + NR_EXCEPTIONS; i++) {
|
||||
write_aux_reg(AUX_IRQ_SELECT, i);
|
||||
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
|
||||
|
||||
/*
|
||||
* Only mask cpu private IRQs here.
|
||||
* "common" interrupts are masked at IDU, otherwise it would
|
||||
* need to be unmasked at each cpu, with IPIs
|
||||
*/
|
||||
if (i < FIRST_EXT_IRQ)
|
||||
write_aux_reg(AUX_IRQ_ENABLE, 0);
|
||||
}
|
||||
|
||||
/* setup status32, don't enable intr yet as kernel doesn't want */
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
*/
|
||||
void arc_init_IRQ(void)
|
||||
{
|
||||
int level_mask = 0;
|
||||
unsigned int level_mask = 0, i;
|
||||
|
||||
/* Is timer high priority Interrupt (Level2 in ARCompact jargon) */
|
||||
level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ;
|
||||
|
@ -40,6 +40,18 @@ void arc_init_IRQ(void)
|
|||
|
||||
if (level_mask)
|
||||
pr_info("Level-2 interrupts bitset %x\n", level_mask);
|
||||
|
||||
/*
|
||||
* Disable all IRQ lines so faulty external hardware won't
|
||||
* trigger interrupt that kernel is not ready to handle.
|
||||
*/
|
||||
for (i = TIMER0_IRQ; i < NR_CPU_IRQS; i++) {
|
||||
unsigned int ienb;
|
||||
|
||||
ienb = read_aux_reg(AUX_IENABLE);
|
||||
ienb &= ~(1 << i);
|
||||
write_aux_reg(AUX_IENABLE, ienb);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -665,6 +665,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
|
|||
static DEFINE_SPINLOCK(lock);
|
||||
unsigned long flags;
|
||||
unsigned int ctrl;
|
||||
phys_addr_t end;
|
||||
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
|
||||
|
@ -694,8 +695,19 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
|
|||
* END needs to be setup before START (latter triggers the operation)
|
||||
* END can't be same as START, so add (l2_line_sz - 1) to sz
|
||||
*/
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
|
||||
end = paddr + sz + l2_line_sz - 1;
|
||||
if (is_pae40_enabled())
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
|
||||
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
|
||||
|
||||
if (is_pae40_enabled())
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
|
||||
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
|
||||
|
||||
/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
|
||||
read_aux_reg(ARC_REG_SLC_CTRL);
|
||||
|
||||
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
|
||||
|
||||
|
@ -1111,6 +1123,13 @@ noinline void __init arc_ioc_setup(void)
|
|||
__dc_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Cache related boot time checks/setups only needed on master CPU:
|
||||
* - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
|
||||
* Assume SMP only, so all cores will have same cache config. A check on
|
||||
* one core suffices for all
|
||||
* - IOC setup / dma callbacks only need to be done once
|
||||
*/
|
||||
void __init arc_cache_init_master(void)
|
||||
{
|
||||
unsigned int __maybe_unused cpu = smp_processor_id();
|
||||
|
@ -1190,12 +1209,27 @@ void __ref arc_cache_init(void)
|
|||
|
||||
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
||||
|
||||
/*
|
||||
* Only master CPU needs to execute rest of function:
|
||||
* - Assume SMP so all cores will have same cache config so
|
||||
* any geomtry checks will be same for all
|
||||
* - IOC setup / dma callbacks only need to be setup once
|
||||
*/
|
||||
if (!cpu)
|
||||
arc_cache_init_master();
|
||||
|
||||
/*
|
||||
* In PAE regime, TLB and cache maintenance ops take wider addresses
|
||||
* And even if PAE is not enabled in kernel, the upper 32-bits still need
|
||||
* to be zeroed to keep the ops sane.
|
||||
* As an optimization for more common !PAE enabled case, zero them out
|
||||
* once at init, rather than checking/setting to 0 for every runtime op
|
||||
*/
|
||||
if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
|
||||
write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
|
||||
write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
|
||||
|
||||
if (l2_line_sz) {
|
||||
write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
|
||||
write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -153,6 +153,19 @@ static void _dma_cache_sync(phys_addr_t paddr, size_t size,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* arc_dma_map_page - map a portion of a page for streaming DMA
|
||||
*
|
||||
* Ensure that any data held in the cache is appropriately discarded
|
||||
* or written back.
|
||||
*
|
||||
* The device owns this memory once this call has completed. The CPU
|
||||
* can regain ownership by calling dma_unmap_page().
|
||||
*
|
||||
* Note: while it takes struct page as arg, caller can "abuse" it to pass
|
||||
* a region larger than PAGE_SIZE, provided it is physically contiguous
|
||||
* and this still works correctly
|
||||
*/
|
||||
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
|
@ -165,6 +178,24 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
|
|||
return plat_phys_to_dma(dev, paddr);
|
||||
}
|
||||
|
||||
/*
|
||||
* arc_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
|
||||
*
|
||||
* After this call, reads by the CPU to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*
|
||||
* Note: historically this routine was not implemented for ARC
|
||||
*/
|
||||
static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
phys_addr_t paddr = plat_dma_to_phys(dev, handle);
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
_dma_cache_sync(paddr, size, dir);
|
||||
}
|
||||
|
||||
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
|
@ -178,6 +209,18 @@ static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
return nents;
|
||||
}
|
||||
|
||||
static void arc_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *s;
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, nents, i)
|
||||
arc_dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir,
|
||||
attrs);
|
||||
}
|
||||
|
||||
static void arc_dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
|
@ -223,7 +266,9 @@ const struct dma_map_ops arc_dma_ops = {
|
|||
.free = arc_dma_free,
|
||||
.mmap = arc_dma_mmap,
|
||||
.map_page = arc_dma_map_page,
|
||||
.unmap_page = arc_dma_unmap_page,
|
||||
.map_sg = arc_dma_map_sg,
|
||||
.unmap_sg = arc_dma_unmap_sg,
|
||||
.sync_single_for_device = arc_dma_sync_single_for_device,
|
||||
.sync_single_for_cpu = arc_dma_sync_single_for_cpu,
|
||||
.sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
|
||||
|
|
|
@ -104,6 +104,8 @@
|
|||
/* A copy of the ASID from the PID reg is kept in asid_cache */
|
||||
DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
|
||||
|
||||
static int __read_mostly pae_exists;
|
||||
|
||||
/*
|
||||
* Utility Routine to erase a J-TLB entry
|
||||
* Caller needs to setup Index Reg (manually or via getIndex)
|
||||
|
@ -784,7 +786,7 @@ void read_decode_mmu_bcr(void)
|
|||
mmu->u_dtlb = mmu4->u_dtlb * 4;
|
||||
mmu->u_itlb = mmu4->u_itlb * 4;
|
||||
mmu->sasid = mmu4->sasid;
|
||||
mmu->pae = mmu4->pae;
|
||||
pae_exists = mmu->pae = mmu4->pae;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -809,6 +811,11 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
return buf;
|
||||
}
|
||||
|
||||
int pae40_exist_but_not_enab(void)
|
||||
{
|
||||
return pae_exists && !is_pae40_enabled();
|
||||
}
|
||||
|
||||
void arc_mmu_init(void)
|
||||
{
|
||||
char str[256];
|
||||
|
@ -859,6 +866,9 @@ void arc_mmu_init(void)
|
|||
/* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
|
||||
write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
|
||||
#endif
|
||||
|
||||
if (pae40_exist_but_not_enab())
|
||||
write_aux_reg(ARC_REG_TLBPD1HI, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#
|
||||
# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
|
||||
menuconfig ARC_PLAT_SIM
|
||||
bool "ARC nSIM based simulation virtual platforms"
|
||||
help
|
||||
Support for nSIM based ARC simulation platforms
|
||||
This includes the standalone nSIM (uart only) vs. System C OSCI VP
|
|
@ -20,11 +20,14 @@
|
|||
*/
|
||||
|
||||
static const char *simulation_compat[] __initconst = {
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
"snps,nsim",
|
||||
"snps,nsim_hs",
|
||||
"snps,nsimosci",
|
||||
#else
|
||||
"snps,nsim_hs",
|
||||
"snps,nsimosci_hs",
|
||||
"snps,zebu_hs",
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
|
|
@ -266,6 +266,7 @@ &hdmi {
|
|||
|
||||
&hdmicec {
|
||||
status = "okay";
|
||||
needs-hpd;
|
||||
};
|
||||
|
||||
&hsi2c_4 {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
menuconfig ARCH_AT91
|
||||
bool "Atmel SoCs"
|
||||
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7 || ARM_SINGLE_ARMV7M
|
||||
select ARM_CPU_SUSPEND if PM
|
||||
select ARM_CPU_SUSPEND if PM && ARCH_MULTI_V7
|
||||
select COMMON_CLK_AT91
|
||||
select GPIOLIB
|
||||
select PINCTRL
|
||||
|
|
|
@ -608,6 +608,9 @@ static void __init at91_pm_init(void (*pm_idle)(void))
|
|||
|
||||
void __init at91rm9200_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
|
||||
/*
|
||||
|
@ -620,18 +623,27 @@ void __init at91rm9200_pm_init(void)
|
|||
|
||||
void __init at91sam9_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
at91_pm_init(at91sam9_idle);
|
||||
}
|
||||
|
||||
void __init sama5_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_SAMA5))
|
||||
return;
|
||||
|
||||
at91_dt_ramc();
|
||||
at91_pm_init(NULL);
|
||||
}
|
||||
|
||||
void __init sama5d2_pm_init(void)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
|
||||
return;
|
||||
|
||||
at91_pm_backup_init();
|
||||
sama5_pm_init();
|
||||
}
|
||||
|
|
|
@ -161,9 +161,11 @@ void fpsimd_flush_thread(void)
|
|||
{
|
||||
if (!system_supports_fpsimd())
|
||||
return;
|
||||
preempt_disable();
|
||||
memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
|
||||
fpsimd_flush_task_state(current);
|
||||
set_thread_flag(TIF_FOREIGN_FPSTATE);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -354,7 +354,6 @@ __primary_switched:
|
|||
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
|
||||
b.ne 0f
|
||||
mov x0, x21 // pass FDT address in x0
|
||||
mov x1, x23 // pass modulo offset in x1
|
||||
bl kaslr_early_init // parse FDT for KASLR options
|
||||
cbz x0, 0f // KASLR disabled? just proceed
|
||||
orr x23, x23, x0 // record KASLR offset
|
||||
|
|
|
@ -75,7 +75,7 @@ extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
|
|||
* containing function pointers) to be reinitialized, and zero-initialized
|
||||
* .bss variables will be reset to 0.
|
||||
*/
|
||||
u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
|
||||
u64 __init kaslr_early_init(u64 dt_phys)
|
||||
{
|
||||
void *fdt;
|
||||
u64 seed, offset, mask, module_range;
|
||||
|
@ -131,15 +131,17 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset)
|
|||
/*
|
||||
* The kernel Image should not extend across a 1GB/32MB/512MB alignment
|
||||
* boundary (for 4KB/16KB/64KB granule kernels, respectively). If this
|
||||
* happens, increase the KASLR offset by the size of the kernel image
|
||||
* rounded up by SWAPPER_BLOCK_SIZE.
|
||||
* happens, round down the KASLR offset by (1 << SWAPPER_TABLE_SHIFT).
|
||||
*
|
||||
* NOTE: The references to _text and _end below will already take the
|
||||
* modulo offset (the physical displacement modulo 2 MB) into
|
||||
* account, given that the physical placement is controlled by
|
||||
* the loader, and will not change as a result of the virtual
|
||||
* mapping we choose.
|
||||
*/
|
||||
if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) !=
|
||||
(((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) {
|
||||
u64 kimg_sz = _end - _text;
|
||||
offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE))
|
||||
& mask;
|
||||
}
|
||||
if ((((u64)_text + offset) >> SWAPPER_TABLE_SHIFT) !=
|
||||
(((u64)_end + offset) >> SWAPPER_TABLE_SHIFT))
|
||||
offset = round_down(offset, 1 << SWAPPER_TABLE_SHIFT);
|
||||
|
||||
if (IS_ENABLED(CONFIG_KASAN))
|
||||
/*
|
||||
|
|
|
@ -435,8 +435,11 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
* the mmap_sem because it would already be released
|
||||
* in __lock_page_or_retry in mm/filemap.c.
|
||||
*/
|
||||
if (fatal_signal_pending(current))
|
||||
if (fatal_signal_pending(current)) {
|
||||
if (!user_mode(regs))
|
||||
goto no_context;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
CONFIG_SOC_TMS320C6455=y
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
|
@ -25,7 +24,6 @@ CONFIG_BLK_DEV_LOOP=y
|
|||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=2
|
||||
CONFIG_BLK_DEV_RAM_SIZE=17000
|
||||
CONFIG_MISC_DEVICES=y
|
||||
# CONFIG_INPUT is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_VT is not set
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
CONFIG_SOC_TMS320C6457=y
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
|
@ -26,7 +25,6 @@ CONFIG_BLK_DEV_LOOP=y
|
|||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=2
|
||||
CONFIG_BLK_DEV_RAM_SIZE=17000
|
||||
CONFIG_MISC_DEVICES=y
|
||||
# CONFIG_INPUT is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_VT is not set
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
CONFIG_SOC_TMS320C6472=y
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
|
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
|
|||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=2
|
||||
CONFIG_BLK_DEV_RAM_SIZE=17000
|
||||
CONFIG_MISC_DEVICES=y
|
||||
# CONFIG_INPUT is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_VT is not set
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
CONFIG_SOC_TMS320C6474=y
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
|
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
|
|||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=2
|
||||
CONFIG_BLK_DEV_RAM_SIZE=17000
|
||||
CONFIG_MISC_DEVICES=y
|
||||
# CONFIG_INPUT is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_VT is not set
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
CONFIG_SOC_TMS320C6678=y
|
||||
CONFIG_EXPERIMENTAL=y
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
|
@ -27,7 +26,6 @@ CONFIG_BLK_DEV_LOOP=y
|
|||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=2
|
||||
CONFIG_BLK_DEV_RAM_SIZE=17000
|
||||
CONFIG_MISC_DEVICES=y
|
||||
# CONFIG_INPUT is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_VT is not set
|
||||
|
|
|
@ -208,14 +208,14 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
|
|||
|
||||
pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
|
||||
if (!pic) {
|
||||
pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
|
||||
pr_err("%pOF: Could not alloc PIC structure.\n", np);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
|
||||
&megamod_domain_ops, pic);
|
||||
if (!pic->irqhost) {
|
||||
pr_err("%s: Could not alloc host.\n", np->full_name);
|
||||
pr_err("%pOF: Could not alloc host.\n", np);
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
|
@ -225,7 +225,7 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
|
|||
|
||||
pic->regs = of_iomap(np, 0);
|
||||
if (!pic->regs) {
|
||||
pr_err("%s: Could not map registers.\n", np->full_name);
|
||||
pr_err("%pOF: Could not map registers.\n", np);
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
|
@ -253,8 +253,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
|
|||
|
||||
irq_data = irq_get_irq_data(irq);
|
||||
if (!irq_data) {
|
||||
pr_err("%s: combiner-%d no irq_data for virq %d!\n",
|
||||
np->full_name, i, irq);
|
||||
pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
|
||||
np, i, irq);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -265,16 +265,16 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
|
|||
* of the core priority interrupts (4 - 15).
|
||||
*/
|
||||
if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
|
||||
pr_err("%s: combiner-%d core irq %ld out of range!\n",
|
||||
np->full_name, i, hwirq);
|
||||
pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
|
||||
np, i, hwirq);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* record the mapping */
|
||||
mapping[hwirq - 4] = i;
|
||||
|
||||
pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
|
||||
np->full_name, i, hwirq);
|
||||
pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
|
||||
np, i, hwirq);
|
||||
|
||||
cascade_data[i].pic = pic;
|
||||
cascade_data[i].index = i;
|
||||
|
@ -290,8 +290,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
|
|||
/* Finally, set up the MUX registers */
|
||||
for (i = 0; i < NR_MUX_OUTPUTS; i++) {
|
||||
if (mapping[i] != IRQ_UNMAPPED) {
|
||||
pr_debug("%s: setting mux %d to priority %d\n",
|
||||
np->full_name, mapping[i], i + 4);
|
||||
pr_debug("%pOF: setting mux %d to priority %d\n",
|
||||
np, mapping[i], i + 4);
|
||||
set_megamod_mux(pic, mapping[i], i);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -436,8 +436,8 @@ void __init c64x_setup_clocks(void)
|
|||
|
||||
err = of_property_read_u32(node, "clock-frequency", &val);
|
||||
if (err || val == 0) {
|
||||
pr_err("%s: no clock-frequency found! Using %dMHz\n",
|
||||
node->full_name, (int)val / 1000000);
|
||||
pr_err("%pOF: no clock-frequency found! Using %dMHz\n",
|
||||
node, (int)val / 1000000);
|
||||
val = 25000000;
|
||||
}
|
||||
clkin1.rate = val;
|
||||
|
|
|
@ -204,14 +204,14 @@ void __init timer64_init(void)
|
|||
|
||||
timer = of_iomap(np, 0);
|
||||
if (!timer) {
|
||||
pr_debug("%s: Cannot map timer registers.\n", np->full_name);
|
||||
pr_debug("%pOF: Cannot map timer registers.\n", np);
|
||||
goto out;
|
||||
}
|
||||
pr_debug("%s: Timer registers=%p.\n", np->full_name, timer);
|
||||
pr_debug("%pOF: Timer registers=%p.\n", np, timer);
|
||||
|
||||
cd->irq = irq_of_parse_and_map(np, 0);
|
||||
if (cd->irq == NO_IRQ) {
|
||||
pr_debug("%s: Cannot find interrupt.\n", np->full_name);
|
||||
pr_debug("%pOF: Cannot find interrupt.\n", np);
|
||||
iounmap(timer);
|
||||
goto out;
|
||||
}
|
||||
|
@ -229,7 +229,7 @@ void __init timer64_init(void)
|
|||
dscr_set_devstate(timer64_devstate_id, DSCR_DEVSTATE_ENABLED);
|
||||
}
|
||||
|
||||
pr_debug("%s: Timer irq=%d.\n", np->full_name, cd->irq);
|
||||
pr_debug("%pOF: Timer irq=%d.\n", np, cd->irq);
|
||||
|
||||
clockevents_calc_mult_shift(cd, c6x_core_freq / TIMER_DIVISOR, 5);
|
||||
|
||||
|
|
|
@ -90,6 +90,24 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
|
|||
/* Mark this context has been used on the new CPU */
|
||||
if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
|
||||
|
||||
/*
|
||||
* This full barrier orders the store to the cpumask above vs
|
||||
* a subsequent operation which allows this CPU to begin loading
|
||||
* translations for next.
|
||||
*
|
||||
* When using the radix MMU that operation is the load of the
|
||||
* MMU context id, which is then moved to SPRN_PID.
|
||||
*
|
||||
* For the hash MMU it is either the first load from slb_cache
|
||||
* in switch_slb(), and/or the store of paca->mm_ctx_id in
|
||||
* copy_mm_to_paca().
|
||||
*
|
||||
* On the read side the barrier is in pte_xchg(), which orders
|
||||
* the store to the PTE vs the load of mm_cpumask.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
new_on_cpu = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
|
|||
unsigned long *p = (unsigned long *)ptep;
|
||||
__be64 prev;
|
||||
|
||||
/* See comment in switch_mm_irqs_off() */
|
||||
prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
|
||||
(__force unsigned long)pte_raw(new));
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
|
|||
{
|
||||
unsigned long *p = (unsigned long *)ptep;
|
||||
|
||||
/* See comment in switch_mm_irqs_off() */
|
||||
return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -294,32 +294,26 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
struct kvm_create_spapr_tce_64 *args)
|
||||
{
|
||||
struct kvmppc_spapr_tce_table *stt = NULL;
|
||||
struct kvmppc_spapr_tce_table *siter;
|
||||
unsigned long npages, size;
|
||||
int ret = -ENOMEM;
|
||||
int i;
|
||||
int fd = -1;
|
||||
|
||||
if (!args->size)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check this LIOBN hasn't been previously allocated */
|
||||
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (stt->liobn == args->liobn)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
|
||||
npages = kvmppc_tce_pages(size);
|
||||
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
|
||||
if (ret) {
|
||||
stt = NULL;
|
||||
goto fail;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
|
||||
GFP_KERNEL);
|
||||
if (!stt)
|
||||
goto fail;
|
||||
goto fail_acct;
|
||||
|
||||
stt->liobn = args->liobn;
|
||||
stt->page_shift = args->page_shift;
|
||||
|
@ -334,24 +328,42 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
kvm_get_kvm(kvm);
|
||||
ret = fd = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
|
||||
stt, O_RDWR | O_CLOEXEC);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
|
||||
|
||||
/* Check this LIOBN hasn't been previously allocated */
|
||||
ret = 0;
|
||||
list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (siter->liobn == args->liobn) {
|
||||
ret = -EBUSY;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
|
||||
kvm_get_kvm(kvm);
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
|
||||
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
|
||||
stt, O_RDWR | O_CLOEXEC);
|
||||
if (!ret)
|
||||
return fd;
|
||||
|
||||
fail:
|
||||
if (stt) {
|
||||
for (i = 0; i < npages; i++)
|
||||
if (stt->pages[i])
|
||||
__free_page(stt->pages[i]);
|
||||
put_unused_fd(fd);
|
||||
|
||||
kfree(stt);
|
||||
}
|
||||
fail:
|
||||
for (i = 0; i < npages; i++)
|
||||
if (stt->pages[i])
|
||||
__free_page(stt->pages[i]);
|
||||
|
||||
kfree(stt);
|
||||
fail_acct:
|
||||
kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1291,6 +1291,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|||
/* Hypervisor doorbell - exit only if host IPI flag set */
|
||||
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
|
||||
bne 3f
|
||||
BEGIN_FTR_SECTION
|
||||
PPC_MSGSYNC
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
|
||||
lbz r0, HSTATE_HOST_IPI(r13)
|
||||
cmpwi r0, 0
|
||||
beq 4f
|
||||
|
|
|
@ -16,7 +16,22 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
|
|||
u8 cppr;
|
||||
u16 ack;
|
||||
|
||||
/* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
|
||||
/*
|
||||
* Ensure any previous store to CPPR is ordered vs.
|
||||
* the subsequent loads from PIPR or ACK.
|
||||
*/
|
||||
eieio();
|
||||
|
||||
/*
|
||||
* DD1 bug workaround: If PIPR is less favored than CPPR
|
||||
* ignore the interrupt or we might incorrectly lose an IPB
|
||||
* bit.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
|
||||
u8 pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
|
||||
if (pipr >= xc->hw_cppr)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Perform the acknowledge OS to register cycle. */
|
||||
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
|
||||
|
@ -235,6 +250,11 @@ static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
|
|||
/*
|
||||
* If we found an interrupt, adjust what the guest CPPR should
|
||||
* be as if we had just fetched that interrupt from HW.
|
||||
*
|
||||
* Note: This can only make xc->cppr smaller as the previous
|
||||
* loop will only exit with hirq != 0 if prio is lower than
|
||||
* the current xc->cppr. Thus we don't need to re-check xc->mfrr
|
||||
* for pending IPIs.
|
||||
*/
|
||||
if (hirq)
|
||||
xc->cppr = prio;
|
||||
|
@ -380,6 +400,12 @@ X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
|
|||
old_cppr = xc->cppr;
|
||||
xc->cppr = cppr;
|
||||
|
||||
/*
|
||||
* Order the above update of xc->cppr with the subsequent
|
||||
* read of xc->mfrr inside push_pending_to_hw()
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* We are masking less, we need to look for pending things
|
||||
* to deliver and set VP pending bits accordingly to trigger
|
||||
|
@ -420,21 +446,37 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
|
|||
* used to signal MFRR changes is EOId when fetched from
|
||||
* the queue.
|
||||
*/
|
||||
if (irq == XICS_IPI || irq == 0)
|
||||
if (irq == XICS_IPI || irq == 0) {
|
||||
/*
|
||||
* This barrier orders the setting of xc->cppr vs.
|
||||
* subsquent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
goto bail;
|
||||
}
|
||||
|
||||
/* Find interrupt source */
|
||||
sb = kvmppc_xive_find_source(xive, irq, &src);
|
||||
if (!sb) {
|
||||
pr_devel(" source not found !\n");
|
||||
rc = H_PARAMETER;
|
||||
/* Same as above */
|
||||
smp_mb();
|
||||
goto bail;
|
||||
}
|
||||
state = &sb->irq_state[src];
|
||||
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
||||
|
||||
state->in_eoi = true;
|
||||
mb();
|
||||
|
||||
/*
|
||||
* This barrier orders both setting of in_eoi above vs,
|
||||
* subsequent test of guest_priority, and the setting
|
||||
* of xc->cppr vs. subsquent test of xc->mfrr done inside
|
||||
* scan_interrupts and push_pending_to_hw
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
again:
|
||||
if (state->guest_priority == MASKED) {
|
||||
|
@ -461,6 +503,14 @@ X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
|
|||
|
||||
}
|
||||
|
||||
/*
|
||||
* This barrier orders the above guest_priority check
|
||||
* and spin_lock/unlock with clearing in_eoi below.
|
||||
*
|
||||
* It also has to be a full mb() as it must ensure
|
||||
* the MMIOs done in source_eoi() are completed before
|
||||
* state->in_eoi is visible.
|
||||
*/
|
||||
mb();
|
||||
state->in_eoi = false;
|
||||
bail:
|
||||
|
@ -495,6 +545,18 @@ X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
|||
/* Locklessly write over MFRR */
|
||||
xc->mfrr = mfrr;
|
||||
|
||||
/*
|
||||
* The load of xc->cppr below and the subsequent MMIO store
|
||||
* to the IPI must happen after the above mfrr update is
|
||||
* globally visible so that:
|
||||
*
|
||||
* - Synchronize with another CPU doing an H_EOI or a H_CPPR
|
||||
* updating xc->cppr then reading xc->mfrr.
|
||||
*
|
||||
* - The target of the IPI sees the xc->mfrr update
|
||||
*/
|
||||
mb();
|
||||
|
||||
/* Shoot the IPI if most favored than target cppr */
|
||||
if (mfrr < xc->cppr)
|
||||
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
|
||||
|
|
|
@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
|
|||
"srl %[cc],28\n"
|
||||
: [cc] "=d" (cc)
|
||||
: [code] "d" (code), [addr] "a" (addr)
|
||||
: "memory", "cc");
|
||||
: "3", "memory", "cc");
|
||||
return cc;
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
|||
VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
|
||||
trace_kvm_s390_handle_sthyi(vcpu, code, addr);
|
||||
|
||||
if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
|
||||
if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
if (code & 0xffff) {
|
||||
|
@ -433,6 +433,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (addr & ~PAGE_MASK)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
|
||||
/*
|
||||
* If the page has not yet been faulted in, we want to do that
|
||||
* now and not after all the expensive calculations.
|
||||
|
|
|
@ -68,6 +68,7 @@ typedef struct { unsigned long iopgprot; } iopgprot_t;
|
|||
#define iopgprot_val(x) ((x).iopgprot)
|
||||
|
||||
#define __pte(x) ((pte_t) { (x) } )
|
||||
#define __pmd(x) ((pmd_t) { { (x) }, })
|
||||
#define __iopte(x) ((iopte_t) { (x) } )
|
||||
#define __pgd(x) ((pgd_t) { (x) } )
|
||||
#define __ctxd(x) ((ctxd_t) { (x) } )
|
||||
|
@ -95,6 +96,7 @@ typedef unsigned long iopgprot_t;
|
|||
#define iopgprot_val(x) (x)
|
||||
|
||||
#define __pte(x) (x)
|
||||
#define __pmd(x) ((pmd_t) { { (x) }, })
|
||||
#define __iopte(x) (x)
|
||||
#define __pgd(x) (x)
|
||||
#define __ctxd(x) (x)
|
||||
|
|
|
@ -1266,8 +1266,6 @@ static int pci_sun4v_probe(struct platform_device *op)
|
|||
* ATU group, but ATU hcalls won't be available.
|
||||
*/
|
||||
hv_atu = false;
|
||||
pr_err(PFX "Could not register hvapi ATU err=%d\n",
|
||||
err);
|
||||
} else {
|
||||
pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
|
||||
vatu_major, vatu_minor);
|
||||
|
|
|
@ -602,7 +602,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
|
|||
{
|
||||
struct pci_dev *dev;
|
||||
int i, has_io, has_mem;
|
||||
unsigned int cmd;
|
||||
unsigned int cmd = 0;
|
||||
struct linux_pcic *pcic;
|
||||
/* struct linux_pbm_info* pbm = &pcic->pbm; */
|
||||
int node;
|
||||
|
|
|
@ -5,26 +5,26 @@
|
|||
.align 4
|
||||
ENTRY(__multi3) /* %o0 = u, %o1 = v */
|
||||
mov %o1, %g1
|
||||
srl %o3, 0, %g4
|
||||
mulx %g4, %g1, %o1
|
||||
srl %o3, 0, %o4
|
||||
mulx %o4, %g1, %o1
|
||||
srlx %g1, 0x20, %g3
|
||||
mulx %g3, %g4, %g5
|
||||
sllx %g5, 0x20, %o5
|
||||
srl %g1, 0, %g4
|
||||
mulx %g3, %o4, %g7
|
||||
sllx %g7, 0x20, %o5
|
||||
srl %g1, 0, %o4
|
||||
sub %o1, %o5, %o5
|
||||
srlx %o5, 0x20, %o5
|
||||
addcc %g5, %o5, %g5
|
||||
addcc %g7, %o5, %g7
|
||||
srlx %o3, 0x20, %o5
|
||||
mulx %g4, %o5, %g4
|
||||
mulx %o4, %o5, %o4
|
||||
mulx %g3, %o5, %o5
|
||||
sethi %hi(0x80000000), %g3
|
||||
addcc %g5, %g4, %g5
|
||||
srlx %g5, 0x20, %g5
|
||||
addcc %g7, %o4, %g7
|
||||
srlx %g7, 0x20, %g7
|
||||
add %g3, %g3, %g3
|
||||
movcc %xcc, %g0, %g3
|
||||
addcc %o5, %g5, %o5
|
||||
sllx %g4, 0x20, %g4
|
||||
add %o1, %g4, %o1
|
||||
addcc %o5, %g7, %o5
|
||||
sllx %o4, 0x20, %o4
|
||||
add %o1, %o4, %o1
|
||||
add %o5, %g3, %g2
|
||||
mulx %g1, %o2, %g1
|
||||
add %g1, %g2, %g1
|
||||
|
|
|
@ -450,10 +450,10 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
|
||||
static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
|
||||
{
|
||||
if (use_xsave()) {
|
||||
copy_kernel_to_xregs(&fpstate->xsave, -1);
|
||||
copy_kernel_to_xregs(&fpstate->xsave, mask);
|
||||
} else {
|
||||
if (use_fxsr())
|
||||
copy_kernel_to_fxregs(&fpstate->fxsave);
|
||||
|
@ -477,7 +477,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
|
|||
: : [addr] "m" (fpstate));
|
||||
}
|
||||
|
||||
__copy_kernel_to_fpregs(fpstate);
|
||||
__copy_kernel_to_fpregs(fpstate, -1);
|
||||
}
|
||||
|
||||
extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
|
||||
|
|
|
@ -492,6 +492,7 @@ struct kvm_vcpu_arch {
|
|||
unsigned long cr4;
|
||||
unsigned long cr4_guest_owned_bits;
|
||||
unsigned long cr8;
|
||||
u32 pkru;
|
||||
u32 hflags;
|
||||
u64 efer;
|
||||
u64 apic_base;
|
||||
|
|
|
@ -140,9 +140,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
|||
mm->context.execute_only_pkey = -1;
|
||||
}
|
||||
#endif
|
||||
init_new_context_ldt(tsk, mm);
|
||||
|
||||
return 0;
|
||||
return init_new_context_ldt(tsk, mm);
|
||||
}
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
|
|
|
@ -469,7 +469,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
|||
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
|
||||
cpuid_mask(&entry->ecx, CPUID_7_ECX);
|
||||
/* PKU is not yet implemented for shadow paging. */
|
||||
if (!tdp_enabled)
|
||||
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
|
||||
entry->ecx &= ~F(PKU);
|
||||
entry->edx &= kvm_cpuid_7_0_edx_x86_features;
|
||||
entry->edx &= get_scattered_cpuid_leaf(7, 0, CPUID_EDX);
|
||||
|
|
|
@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
|
|||
| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
|
||||
}
|
||||
|
||||
static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_x86_ops->get_pkru(vcpu);
|
||||
}
|
||||
|
||||
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hflags |= HF_GUEST_MASK;
|
||||
|
|
|
@ -185,7 +185,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
|||
* index of the protection domain, so pte_pkey * 2 is
|
||||
* is the index of the first bit for the domain.
|
||||
*/
|
||||
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
|
||||
pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
|
||||
|
||||
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
|
||||
offset = (pfec & ~1) +
|
||||
|
|
|
@ -1777,11 +1777,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|||
to_svm(vcpu)->vmcb->save.rflags = rflags;
|
||||
}
|
||||
|
||||
static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
|
||||
{
|
||||
switch (reg) {
|
||||
|
@ -5413,8 +5408,6 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.get_rflags = svm_get_rflags,
|
||||
.set_rflags = svm_set_rflags,
|
||||
|
||||
.get_pkru = svm_get_pkru,
|
||||
|
||||
.tlb_flush = svm_flush_tlb,
|
||||
|
||||
.run = svm_vcpu_run,
|
||||
|
|
|
@ -636,8 +636,6 @@ struct vcpu_vmx {
|
|||
|
||||
u64 current_tsc_ratio;
|
||||
|
||||
bool guest_pkru_valid;
|
||||
u32 guest_pkru;
|
||||
u32 host_pkru;
|
||||
|
||||
/*
|
||||
|
@ -2383,11 +2381,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
|
|||
to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
|
||||
}
|
||||
|
||||
static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return to_vmx(vcpu)->guest_pkru;
|
||||
}
|
||||
|
||||
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
||||
|
@ -9020,8 +9013,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
|
||||
vmx_set_interrupt_shadow(vcpu, 0);
|
||||
|
||||
if (vmx->guest_pkru_valid)
|
||||
__write_pkru(vmx->guest_pkru);
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
||||
vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vcpu->arch.pkru);
|
||||
|
||||
atomic_switch_perf_msrs(vmx);
|
||||
debugctlmsr = get_debugctlmsr();
|
||||
|
@ -9169,13 +9164,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
* back on host, so it is safe to read guest PKRU from current
|
||||
* XSAVE.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_OSPKE)) {
|
||||
vmx->guest_pkru = __read_pkru();
|
||||
if (vmx->guest_pkru != vmx->host_pkru) {
|
||||
vmx->guest_pkru_valid = true;
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
|
||||
vcpu->arch.pkru = __read_pkru();
|
||||
if (vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vmx->host_pkru);
|
||||
} else
|
||||
vmx->guest_pkru_valid = false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -11682,8 +11675,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.get_rflags = vmx_get_rflags,
|
||||
.set_rflags = vmx_set_rflags,
|
||||
|
||||
.get_pkru = vmx_get_pkru,
|
||||
|
||||
.tlb_flush = vmx_flush_tlb,
|
||||
|
||||
.run = vmx_vcpu_run,
|
||||
|
|
|
@ -3245,7 +3245,12 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
|
|||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
&size, &offset, &ecx, &edx);
|
||||
memcpy(dest + offset, src, size);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
memcpy(dest + offset, &vcpu->arch.pkru,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
memcpy(dest + offset, src, size);
|
||||
|
||||
}
|
||||
|
||||
valid -= feature;
|
||||
|
@ -3283,7 +3288,11 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
|
|||
u32 size, offset, ecx, edx;
|
||||
cpuid_count(XSTATE_CPUID, index,
|
||||
&size, &offset, &ecx, &edx);
|
||||
memcpy(dest, src + offset, size);
|
||||
if (feature == XFEATURE_MASK_PKRU)
|
||||
memcpy(&vcpu->arch.pkru, src + offset,
|
||||
sizeof(vcpu->arch.pkru));
|
||||
else
|
||||
memcpy(dest, src + offset, size);
|
||||
}
|
||||
|
||||
valid -= feature;
|
||||
|
@ -7633,7 +7642,9 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
vcpu->guest_fpu_loaded = 1;
|
||||
__kernel_fpu_begin();
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
|
||||
/* PKRU is separately restored in kvm_x86_ops->run. */
|
||||
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
|
||||
~XFEATURE_MASK_PKRU);
|
||||
trace_kvm_fpu(1);
|
||||
}
|
||||
|
||||
|
|
|
@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
|
|||
QUEUE_FLAG_NAME(STATS),
|
||||
QUEUE_FLAG_NAME(POLL_STATS),
|
||||
QUEUE_FLAG_NAME(REGISTERED),
|
||||
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
|
||||
QUEUE_FLAG_NAME(QUIESCED),
|
||||
};
|
||||
#undef QUEUE_FLAG_NAME
|
||||
|
||||
|
@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
|
|||
CMD_FLAG_NAME(RAHEAD),
|
||||
CMD_FLAG_NAME(BACKGROUND),
|
||||
CMD_FLAG_NAME(NOUNMAP),
|
||||
CMD_FLAG_NAME(NOWAIT),
|
||||
};
|
||||
#undef CMD_FLAG_NAME
|
||||
|
||||
|
|
|
@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
|
|||
} \
|
||||
} while (0)
|
||||
|
||||
static inline unsigned int throtl_bio_data_size(struct bio *bio)
|
||||
{
|
||||
/* assume it's one sector */
|
||||
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
|
||||
return 512;
|
||||
return bio->bi_iter.bi_size;
|
||||
}
|
||||
|
||||
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
|
||||
{
|
||||
INIT_LIST_HEAD(&qn->node);
|
||||
|
@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||
bool rw = bio_data_dir(bio);
|
||||
u64 bytes_allowed, extra_bytes, tmp;
|
||||
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
|
||||
unsigned int bio_size = throtl_bio_data_size(bio);
|
||||
|
||||
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
|
||||
|
||||
|
@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
|
|||
do_div(tmp, HZ);
|
||||
bytes_allowed = tmp;
|
||||
|
||||
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
|
||||
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
|
||||
if (wait)
|
||||
*wait = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Calc approx time to dispatch */
|
||||
extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
|
||||
extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
|
||||
jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
|
||||
|
||||
if (!jiffy_wait)
|
||||
|
@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
|
|||
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
|
||||
{
|
||||
bool rw = bio_data_dir(bio);
|
||||
unsigned int bio_size = throtl_bio_data_size(bio);
|
||||
|
||||
/* Charge the bio to the group */
|
||||
tg->bytes_disp[rw] += bio->bi_iter.bi_size;
|
||||
tg->bytes_disp[rw] += bio_size;
|
||||
tg->io_disp[rw]++;
|
||||
tg->last_bytes_disp[rw] += bio->bi_iter.bi_size;
|
||||
tg->last_bytes_disp[rw] += bio_size;
|
||||
tg->last_io_disp[rw]++;
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,26 +29,25 @@
|
|||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
/**
|
||||
* bsg_destroy_job - routine to teardown/delete a bsg job
|
||||
* bsg_teardown_job - routine to teardown a bsg job
|
||||
* @job: bsg_job that is to be torn down
|
||||
*/
|
||||
static void bsg_destroy_job(struct kref *kref)
|
||||
static void bsg_teardown_job(struct kref *kref)
|
||||
{
|
||||
struct bsg_job *job = container_of(kref, struct bsg_job, kref);
|
||||
struct request *rq = job->req;
|
||||
|
||||
blk_end_request_all(rq, BLK_STS_OK);
|
||||
|
||||
put_device(job->dev); /* release reference for the request */
|
||||
|
||||
kfree(job->request_payload.sg_list);
|
||||
kfree(job->reply_payload.sg_list);
|
||||
kfree(job);
|
||||
|
||||
blk_end_request_all(rq, BLK_STS_OK);
|
||||
}
|
||||
|
||||
void bsg_job_put(struct bsg_job *job)
|
||||
{
|
||||
kref_put(&job->kref, bsg_destroy_job);
|
||||
kref_put(&job->kref, bsg_teardown_job);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bsg_job_put);
|
||||
|
||||
|
@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
|
|||
*/
|
||||
static void bsg_softirq_done(struct request *rq)
|
||||
{
|
||||
struct bsg_job *job = rq->special;
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
bsg_job_put(job);
|
||||
}
|
||||
|
@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
|
|||
}
|
||||
|
||||
/**
|
||||
* bsg_create_job - create the bsg_job structure for the bsg request
|
||||
* bsg_prepare_job - create the bsg_job structure for the bsg request
|
||||
* @dev: device that is being sent the bsg request
|
||||
* @req: BSG request that needs a job structure
|
||||
*/
|
||||
static int bsg_create_job(struct device *dev, struct request *req)
|
||||
static int bsg_prepare_job(struct device *dev, struct request *req)
|
||||
{
|
||||
struct request *rsp = req->next_rq;
|
||||
struct request_queue *q = req->q;
|
||||
struct scsi_request *rq = scsi_req(req);
|
||||
struct bsg_job *job;
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
int ret;
|
||||
|
||||
BUG_ON(req->special);
|
||||
|
||||
job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
|
||||
if (!job)
|
||||
return -ENOMEM;
|
||||
|
||||
req->special = job;
|
||||
job->req = req;
|
||||
if (q->bsg_job_size)
|
||||
job->dd_data = (void *)&job[1];
|
||||
job->request = rq->cmd;
|
||||
job->request_len = rq->cmd_len;
|
||||
job->reply = rq->sense;
|
||||
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
|
||||
* allocated */
|
||||
|
||||
if (req->bio) {
|
||||
ret = bsg_map_buffer(&job->request_payload, req);
|
||||
if (ret)
|
||||
|
@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
{
|
||||
struct device *dev = q->queuedata;
|
||||
struct request *req;
|
||||
struct bsg_job *job;
|
||||
int ret;
|
||||
|
||||
if (!get_device(dev))
|
||||
|
@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
break;
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
ret = bsg_create_job(dev, req);
|
||||
ret = bsg_prepare_job(dev, req);
|
||||
if (ret) {
|
||||
scsi_req(req)->result = ret;
|
||||
blk_end_request_all(req, BLK_STS_OK);
|
||||
|
@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
continue;
|
||||
}
|
||||
|
||||
job = req->special;
|
||||
ret = q->bsg_job_fn(job);
|
||||
ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
|
||||
spin_lock_irq(q->queue_lock);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
|
|||
spin_lock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
struct scsi_request *sreq = &job->sreq;
|
||||
|
||||
memset(job, 0, sizeof(*job));
|
||||
|
||||
scsi_req_init(sreq);
|
||||
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
sreq->sense = kzalloc(sreq->sense_len, gfp);
|
||||
if (!sreq->sense)
|
||||
return -ENOMEM;
|
||||
|
||||
job->req = req;
|
||||
job->reply = sreq->sense;
|
||||
job->reply_len = sreq->sense_len;
|
||||
job->dd_data = job + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bsg_exit_rq(struct request_queue *q, struct request *req)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
struct scsi_request *sreq = &job->sreq;
|
||||
|
||||
kfree(sreq->sense);
|
||||
}
|
||||
|
||||
/**
|
||||
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests
|
||||
* @dev: device to attach bsg device to
|
||||
|
@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
|
|||
q = blk_alloc_queue(GFP_KERNEL);
|
||||
if (!q)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
q->cmd_size = sizeof(struct scsi_request);
|
||||
q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
|
||||
q->init_rq_fn = bsg_init_rq;
|
||||
q->exit_rq_fn = bsg_exit_rq;
|
||||
q->request_fn = bsg_request_fn;
|
||||
|
||||
ret = blk_init_allocated_queue(q);
|
||||
|
@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
|
|||
goto out_cleanup_queue;
|
||||
|
||||
q->queuedata = dev;
|
||||
q->bsg_job_size = dd_job_size;
|
||||
q->bsg_job_fn = job_fn;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
|
||||
|
|
|
@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
|
|||
free_buffer_on_error = TRUE;
|
||||
}
|
||||
|
||||
status = acpi_get_handle(handle, pathname, &target_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
if (pathname) {
|
||||
status = acpi_get_handle(handle, pathname, &target_handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
} else {
|
||||
target_handle = handle;
|
||||
}
|
||||
|
||||
full_pathname = acpi_ns_get_external_pathname(target_handle);
|
||||
|
|
|
@ -1741,7 +1741,7 @@ int __init acpi_ec_dsdt_probe(void)
|
|||
* functioning ECDT EC first in order to handle the events.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=115021
|
||||
*/
|
||||
int __init acpi_ec_ecdt_start(void)
|
||||
static int __init acpi_ec_ecdt_start(void)
|
||||
{
|
||||
acpi_handle handle;
|
||||
|
||||
|
@ -2003,20 +2003,17 @@ static inline void acpi_ec_query_exit(void)
|
|||
int __init acpi_ec_init(void)
|
||||
{
|
||||
int result;
|
||||
int ecdt_fail, dsdt_fail;
|
||||
|
||||
/* register workqueue for _Qxx evaluations */
|
||||
result = acpi_ec_query_init();
|
||||
if (result)
|
||||
goto err_exit;
|
||||
/* Now register the driver for the EC */
|
||||
result = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
if (result)
|
||||
goto err_exit;
|
||||
return result;
|
||||
|
||||
err_exit:
|
||||
if (result)
|
||||
acpi_ec_query_exit();
|
||||
return result;
|
||||
/* Drivers must be started after acpi_ec_query_init() */
|
||||
ecdt_fail = acpi_ec_ecdt_start();
|
||||
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
|
||||
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
|
||||
}
|
||||
|
||||
/* EC driver currently not unloadable */
|
||||
|
|
|
@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
|
|||
int acpi_ec_init(void);
|
||||
int acpi_ec_ecdt_probe(void);
|
||||
int acpi_ec_dsdt_probe(void);
|
||||
int acpi_ec_ecdt_start(void);
|
||||
void acpi_ec_block_transactions(void);
|
||||
void acpi_ec_unblock_transactions(void);
|
||||
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
|
||||
|
|
|
@ -1047,7 +1047,7 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
|
|||
fwnode_for_each_child_node(fwnode, child) {
|
||||
u32 nr;
|
||||
|
||||
if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
|
||||
if (fwnode_property_read_u32(child, prop_name, &nr))
|
||||
continue;
|
||||
|
||||
if (val == nr)
|
||||
|
|
|
@ -2084,7 +2084,6 @@ int __init acpi_scan_init(void)
|
|||
|
||||
acpi_gpe_apply_masked_gpes();
|
||||
acpi_update_all_gpes();
|
||||
acpi_ec_ecdt_start();
|
||||
|
||||
acpi_scan_initialized = true;
|
||||
|
||||
|
|
|
@ -3362,7 +3362,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
const char *failure_string;
|
||||
struct binder_buffer *buffer;
|
||||
|
||||
if (proc->tsk != current)
|
||||
if (proc->tsk != current->group_leader)
|
||||
return -EINVAL;
|
||||
|
||||
if ((vma->vm_end - vma->vm_start) > SZ_4M)
|
||||
|
|
|
@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
|
|||
}
|
||||
|
||||
static int
|
||||
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
|
||||
loff_t logical_blocksize)
|
||||
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
|
||||
{
|
||||
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
|
||||
sector_t x = (sector_t)size;
|
||||
|
@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
|
|||
lo->lo_offset = offset;
|
||||
if (lo->lo_sizelimit != sizelimit)
|
||||
lo->lo_sizelimit = sizelimit;
|
||||
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
|
||||
lo->lo_logical_blocksize = logical_blocksize;
|
||||
blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
|
||||
blk_queue_logical_block_size(lo->lo_queue,
|
||||
lo->lo_logical_blocksize);
|
||||
}
|
||||
set_capacity(lo->lo_disk, x);
|
||||
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
|
||||
/* let user-space know about the new size */
|
||||
|
@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
|
|||
struct file *file = lo->lo_backing_file;
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct request_queue *q = lo->lo_queue;
|
||||
int lo_bits = 9;
|
||||
|
||||
/*
|
||||
* We use punch hole to reclaim the free space used by the
|
||||
|
@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
|
|||
|
||||
q->limits.discard_granularity = inode->i_sb->s_blocksize;
|
||||
q->limits.discard_alignment = 0;
|
||||
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
|
||||
lo_bits = blksize_bits(lo->lo_logical_blocksize);
|
||||
|
||||
blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
|
||||
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
|
||||
blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
|
||||
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||
}
|
||||
|
||||
|
@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
|||
|
||||
lo->use_dio = false;
|
||||
lo->lo_blocksize = lo_blocksize;
|
||||
lo->lo_logical_blocksize = 512;
|
||||
lo->lo_device = bdev;
|
||||
lo->lo_flags = lo_flags;
|
||||
lo->lo_backing_file = file;
|
||||
|
@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|||
int err;
|
||||
struct loop_func_table *xfer;
|
||||
kuid_t uid = current_uid();
|
||||
int lo_flags = lo->lo_flags;
|
||||
|
||||
if (lo->lo_encrypt_key_size &&
|
||||
!uid_eq(lo->lo_key_owner, uid) &&
|
||||
|
@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
|
|||
if (err)
|
||||
goto exit;
|
||||
|
||||
if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
|
||||
if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
|
||||
lo->lo_logical_blocksize = 512;
|
||||
lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
|
||||
if (LO_INFO_BLOCKSIZE(info) != 512 &&
|
||||
LO_INFO_BLOCKSIZE(info) != 1024 &&
|
||||
LO_INFO_BLOCKSIZE(info) != 2048 &&
|
||||
LO_INFO_BLOCKSIZE(info) != 4096)
|
||||
return -EINVAL;
|
||||
if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (lo->lo_offset != info->lo_offset ||
|
||||
lo->lo_sizelimit != info->lo_sizelimit ||
|
||||
lo->lo_flags != lo_flags ||
|
||||
((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
|
||||
lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
|
||||
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
|
||||
LO_INFO_BLOCKSIZE(info))) {
|
||||
lo->lo_sizelimit != info->lo_sizelimit) {
|
||||
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
|
||||
err = -EFBIG;
|
||||
goto exit;
|
||||
}
|
||||
|
@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
|
|||
if (unlikely(lo->lo_state != Lo_bound))
|
||||
return -ENXIO;
|
||||
|
||||
return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
|
||||
lo->lo_logical_blocksize);
|
||||
return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
|
||||
}
|
||||
|
||||
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
|
||||
|
|
|
@ -49,7 +49,6 @@ struct loop_device {
|
|||
struct file * lo_backing_file;
|
||||
struct block_device *lo_device;
|
||||
unsigned lo_blocksize;
|
||||
unsigned lo_logical_blocksize;
|
||||
void *key_data;
|
||||
|
||||
gfp_t old_gfp_mask;
|
||||
|
|
|
@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
|
|||
struct request_queue *q = vblk->disk->queue;
|
||||
char cap_str_2[10], cap_str_10[10];
|
||||
char *envp[] = { "RESIZE=1", NULL };
|
||||
unsigned long long nblocks;
|
||||
u64 capacity;
|
||||
|
||||
/* Host must always specify the capacity. */
|
||||
|
@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
|
|||
capacity = (sector_t)-1;
|
||||
}
|
||||
|
||||
string_get_size(capacity, queue_logical_block_size(q),
|
||||
nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
|
||||
|
||||
string_get_size(nblocks, queue_logical_block_size(q),
|
||||
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
|
||||
string_get_size(capacity, queue_logical_block_size(q),
|
||||
string_get_size(nblocks, queue_logical_block_size(q),
|
||||
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
|
||||
|
||||
dev_notice(&vdev->dev,
|
||||
"new size: %llu %d-byte logical blocks (%s/%s)\n",
|
||||
(unsigned long long)capacity,
|
||||
queue_logical_block_size(q),
|
||||
cap_str_10, cap_str_2);
|
||||
"new size: %llu %d-byte logical blocks (%s/%s)\n",
|
||||
nblocks,
|
||||
queue_logical_block_size(q),
|
||||
cap_str_10,
|
||||
cap_str_2);
|
||||
|
||||
set_capacity(vblk->disk, capacity);
|
||||
revalidate_disk(vblk->disk);
|
||||
|
|
|
@ -717,8 +717,8 @@ static int tegra_adma_probe(struct platform_device *pdev)
|
|||
tdc->chan_addr = tdma->base_addr + ADMA_CH_REG_OFFSET(i);
|
||||
|
||||
tdc->irq = of_irq_get(pdev->dev.of_node, i);
|
||||
if (tdc->irq < 0) {
|
||||
ret = tdc->irq;
|
||||
if (tdc->irq <= 0) {
|
||||
ret = tdc->irq ?: -ENXIO;
|
||||
goto irq_dispose;
|
||||
}
|
||||
|
||||
|
|
|
@ -557,7 +557,7 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
|
|||
edge_cause = mvebu_gpio_read_edge_cause(mvchip);
|
||||
edge_mask = mvebu_gpio_read_edge_mask(mvchip);
|
||||
|
||||
cause = (data_in ^ level_mask) | (edge_cause & edge_mask);
|
||||
cause = (data_in & level_mask) | (edge_cause & edge_mask);
|
||||
|
||||
for (i = 0; i < mvchip->chip.ngpio; i++) {
|
||||
int irq;
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/gpio/driver.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -432,6 +433,11 @@ static struct attribute *gpiochip_attrs[] = {
|
|||
};
|
||||
ATTRIBUTE_GROUPS(gpiochip);
|
||||
|
||||
static struct gpio_desc *gpio_to_valid_desc(int gpio)
|
||||
{
|
||||
return gpio_is_valid(gpio) ? gpio_to_desc(gpio) : NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* /sys/class/gpio/export ... write-only
|
||||
* integer N ... number of GPIO to export (full access)
|
||||
|
@ -450,7 +456,7 @@ static ssize_t export_store(struct class *class,
|
|||
if (status < 0)
|
||||
goto done;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
desc = gpio_to_valid_desc(gpio);
|
||||
/* reject invalid GPIOs */
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
|
@ -493,7 +499,7 @@ static ssize_t unexport_store(struct class *class,
|
|||
if (status < 0)
|
||||
goto done;
|
||||
|
||||
desc = gpio_to_desc(gpio);
|
||||
desc = gpio_to_valid_desc(gpio);
|
||||
/* reject bogus commands (gpio_unexport ignores them) */
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO %ld\n", __func__, gpio);
|
||||
|
|
|
@ -1655,6 +1655,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
|
|||
if (config->funcs->atomic_check)
|
||||
ret = config->funcs->atomic_check(state->dev, state);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!state->allow_modeset) {
|
||||
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
|
||||
|
@ -1665,7 +1668,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
|
|||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_check_only);
|
||||
|
||||
|
@ -2167,10 +2170,10 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
|
|||
struct drm_atomic_state *state;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
struct drm_plane *plane;
|
||||
struct drm_out_fence_state *fence_state = NULL;
|
||||
struct drm_out_fence_state *fence_state;
|
||||
unsigned plane_mask;
|
||||
int ret = 0;
|
||||
unsigned int i, j, num_fences = 0;
|
||||
unsigned int i, j, num_fences;
|
||||
|
||||
/* disallow for drivers not supporting atomic: */
|
||||
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
|
||||
|
@ -2211,6 +2214,8 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
|
|||
plane_mask = 0;
|
||||
copied_objs = 0;
|
||||
copied_props = 0;
|
||||
fence_state = NULL;
|
||||
num_fences = 0;
|
||||
|
||||
for (i = 0; i < arg->count_objs; i++) {
|
||||
uint32_t obj_id, count_props;
|
||||
|
|
|
@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
|
|||
struct drm_gem_object *obj = ptr;
|
||||
struct drm_device *dev = obj->dev;
|
||||
|
||||
if (dev->driver->gem_close_object)
|
||||
dev->driver->gem_close_object(obj, file_priv);
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_PRIME))
|
||||
drm_gem_remove_prime_handles(obj, file_priv);
|
||||
drm_vma_node_revoke(&obj->vma_node, file_priv);
|
||||
|
||||
if (dev->driver->gem_close_object)
|
||||
dev->driver->gem_close_object(obj, file_priv);
|
||||
|
||||
drm_gem_object_handle_put_unlocked(obj);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -601,6 +601,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
|
|||
|
||||
crtc = drm_crtc_find(dev, plane_req->crtc_id);
|
||||
if (!crtc) {
|
||||
drm_framebuffer_put(fb);
|
||||
DRM_DEBUG_KMS("Unknown crtc ID %d\n",
|
||||
plane_req->crtc_id);
|
||||
return -ENOENT;
|
||||
|
|
|
@ -2714,7 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|||
unmap_src:
|
||||
i915_gem_object_unpin_map(obj);
|
||||
put_obj:
|
||||
i915_gem_object_put(wa_ctx->indirect_ctx.obj);
|
||||
i915_gem_object_put(obj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -1120,8 +1120,8 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
|
||||
uint8_t aux_channel, ddc_pin;
|
||||
/* Each DDI port can have more than one value on the "DVO Port" field,
|
||||
* so look for all the possible values for each port and abort if more
|
||||
* than one is found. */
|
||||
* so look for all the possible values for each port.
|
||||
*/
|
||||
int dvo_ports[][3] = {
|
||||
{DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
|
||||
{DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
|
||||
|
@ -1130,7 +1130,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||
{DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
|
||||
};
|
||||
|
||||
/* Find the child device to use, abort if more than one found. */
|
||||
/*
|
||||
* Find the first child device to reference the port, report if more
|
||||
* than one found.
|
||||
*/
|
||||
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
|
||||
it = dev_priv->vbt.child_dev + i;
|
||||
|
||||
|
@ -1140,11 +1143,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
|
|||
|
||||
if (it->common.dvo_port == dvo_ports[port][j]) {
|
||||
if (child) {
|
||||
DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
|
||||
DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
|
||||
port_name(port));
|
||||
return;
|
||||
} else {
|
||||
child = it;
|
||||
}
|
||||
child = it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ static u32 dcs_get_backlight(struct intel_connector *connector)
|
|||
struct intel_encoder *encoder = connector->encoder;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct mipi_dsi_device *dsi_device;
|
||||
u8 data;
|
||||
u8 data = 0;
|
||||
enum port port;
|
||||
|
||||
/* FIXME: Need to take care of 16 bit brightness level */
|
||||
|
|
|
@ -306,7 +306,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
|
|||
|
||||
if (!gpio_desc) {
|
||||
gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
|
||||
"panel", gpio_index,
|
||||
NULL, gpio_index,
|
||||
value ? GPIOD_OUT_LOW :
|
||||
GPIOD_OUT_HIGH);
|
||||
|
||||
|
|
|
@ -1221,6 +1221,14 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u8 gtiir[] = {
|
||||
[RCS] = 0,
|
||||
[BCS] = 0,
|
||||
[VCS] = 1,
|
||||
[VCS2] = 1,
|
||||
[VECS] = 3,
|
||||
};
|
||||
|
||||
static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
|
@ -1245,9 +1253,22 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
|||
|
||||
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
|
||||
|
||||
/* After a GPU reset, we may have requests to replay */
|
||||
GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
|
||||
|
||||
/*
|
||||
* Clear any pending interrupt state.
|
||||
*
|
||||
* We do it twice out of paranoia that some of the IIR are double
|
||||
* buffered, and if we only reset it once there may still be
|
||||
* an interrupt pending.
|
||||
*/
|
||||
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
|
||||
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
|
||||
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
|
||||
GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
|
||||
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||
|
||||
/* After a GPU reset, we may have requests to replay */
|
||||
submit = false;
|
||||
for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++) {
|
||||
if (!port_isset(&port[n]))
|
||||
|
|
|
@ -210,8 +210,8 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
|
|||
struct drm_device *dev = intel_dig_port->base.base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (!IS_GEN9(dev_priv)) {
|
||||
DRM_ERROR("LSPCON is supported on GEN9 only\n");
|
||||
if (!HAS_LSPCON(dev_priv)) {
|
||||
DRM_ERROR("LSPCON is not supported on this platform\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -545,15 +545,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
|
|||
return;
|
||||
}
|
||||
|
||||
ics = ipu_drm_fourcc_to_colorspace(fb->format->format);
|
||||
switch (ipu_plane->dp_flow) {
|
||||
case IPU_DP_FLOW_SYNC_BG:
|
||||
ipu_dp_setup_channel(ipu_plane->dp,
|
||||
IPUV3_COLORSPACE_RGB,
|
||||
IPUV3_COLORSPACE_RGB);
|
||||
ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_RGB);
|
||||
ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
|
||||
break;
|
||||
case IPU_DP_FLOW_SYNC_FG:
|
||||
ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
|
||||
ipu_dp_setup_channel(ipu_plane->dp, ics,
|
||||
IPUV3_COLORSPACE_UNKNOWN);
|
||||
/* Enable local alpha on partial plane */
|
||||
|
|
|
@ -275,11 +275,15 @@ static void rockchip_drm_fb_resume(struct drm_device *drm)
|
|||
static int rockchip_drm_sys_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct rockchip_drm_private *priv = drm->dev_private;
|
||||
struct rockchip_drm_private *priv;
|
||||
|
||||
if (!drm)
|
||||
return 0;
|
||||
|
||||
drm_kms_helper_poll_disable(drm);
|
||||
rockchip_drm_fb_suspend(drm);
|
||||
|
||||
priv = drm->dev_private;
|
||||
priv->state = drm_atomic_helper_suspend(drm);
|
||||
if (IS_ERR(priv->state)) {
|
||||
rockchip_drm_fb_resume(drm);
|
||||
|
@ -293,8 +297,12 @@ static int rockchip_drm_sys_suspend(struct device *dev)
|
|||
static int rockchip_drm_sys_resume(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct rockchip_drm_private *priv = drm->dev_private;
|
||||
struct rockchip_drm_private *priv;
|
||||
|
||||
if (!drm)
|
||||
return 0;
|
||||
|
||||
priv = drm->dev_private;
|
||||
drm_atomic_helper_resume(drm, priv->state);
|
||||
rockchip_drm_fb_resume(drm);
|
||||
drm_kms_helper_poll_enable(drm);
|
||||
|
|
|
@ -25,12 +25,20 @@
|
|||
#include "sun4i_framebuffer.h"
|
||||
#include "sun4i_tcon.h"
|
||||
|
||||
static void sun4i_drv_lastclose(struct drm_device *dev)
|
||||
{
|
||||
struct sun4i_drv *drv = dev->dev_private;
|
||||
|
||||
drm_fbdev_cma_restore_mode(drv->fbdev);
|
||||
}
|
||||
|
||||
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
|
||||
|
||||
static struct drm_driver sun4i_drv_driver = {
|
||||
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
|
||||
|
||||
/* Generic Operations */
|
||||
.lastclose = sun4i_drv_lastclose,
|
||||
.fops = &sun4i_drv_fops,
|
||||
.name = "sun4i-drm",
|
||||
.desc = "Allwinner sun4i Display Engine",
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
config IMX_IPUV3_CORE
|
||||
tristate "IPUv3 core support"
|
||||
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
|
||||
depends on DRM || !DRM # if DRM=m, this can't be 'y'
|
||||
select GENERIC_IRQ_CHIP
|
||||
help
|
||||
Choose this if you have a i.MX5/6 system and want to use the Image
|
||||
|
|
|
@ -410,10 +410,11 @@ static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus)
|
|||
}
|
||||
|
||||
/* We are in an invalid state; reset bus to a known state. */
|
||||
if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) {
|
||||
if (!bus->msgs) {
|
||||
dev_err(bus->dev, "bus in unknown state");
|
||||
bus->cmd_err = -EIO;
|
||||
aspeed_i2c_do_stop(bus);
|
||||
if (bus->master_state != ASPEED_I2C_MASTER_STOP)
|
||||
aspeed_i2c_do_stop(bus);
|
||||
goto out_no_complete;
|
||||
}
|
||||
msg = &bus->msgs[bus->msgs_index];
|
||||
|
|
|
@ -198,8 +198,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
|
|||
dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY;
|
||||
|
||||
dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL |
|
||||
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED |
|
||||
DW_IC_CON_SPEED_FAST;
|
||||
DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED;
|
||||
|
||||
dev->mode = DW_IC_SLAVE;
|
||||
|
||||
|
@ -430,7 +429,7 @@ static void dw_i2c_plat_complete(struct device *dev)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int dw_i2c_plat_suspend(struct device *dev)
|
||||
static int dw_i2c_plat_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
|
||||
|
@ -452,11 +451,21 @@ static int dw_i2c_plat_resume(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int dw_i2c_plat_suspend(struct device *dev)
|
||||
{
|
||||
pm_runtime_resume(dev);
|
||||
return dw_i2c_plat_runtime_suspend(dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
|
||||
.prepare = dw_i2c_plat_prepare,
|
||||
.complete = dw_i2c_plat_complete,
|
||||
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
|
||||
SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
|
||||
SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
|
||||
dw_i2c_plat_resume,
|
||||
NULL)
|
||||
};
|
||||
|
||||
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
|
||||
|
|
|
@ -177,6 +177,8 @@ static int i2c_dw_reg_slave(struct i2c_client *slave)
|
|||
return -EBUSY;
|
||||
if (slave->flags & I2C_CLIENT_TEN)
|
||||
return -EAFNOSUPPORT;
|
||||
pm_runtime_get_sync(dev->dev);
|
||||
|
||||
/*
|
||||
* Set slave address in the IC_SAR register,
|
||||
* the address to which the DW_apb_i2c responds.
|
||||
|
@ -205,6 +207,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
|
|||
dev->disable_int(dev);
|
||||
dev->disable(dev);
|
||||
dev->slave = NULL;
|
||||
pm_runtime_put(dev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -272,7 +275,7 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev)
|
|||
slave_activity = ((dw_readl(dev, DW_IC_STATUS) &
|
||||
DW_IC_STATUS_SLAVE_ACTIVITY) >> 6);
|
||||
|
||||
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY))
|
||||
if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave)
|
||||
return 0;
|
||||
|
||||
dev_dbg(dev->dev,
|
||||
|
@ -382,7 +385,6 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
|
|||
ret = i2c_add_numbered_adapter(adap);
|
||||
if (ret)
|
||||
dev_err(dev->dev, "failure adding adapter: %d\n", ret);
|
||||
pm_runtime_put_noidle(dev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -127,8 +127,7 @@ static int simtec_i2c_probe(struct platform_device *dev)
|
|||
iounmap(pd->reg);
|
||||
|
||||
err_res:
|
||||
release_resource(pd->ioarea);
|
||||
kfree(pd->ioarea);
|
||||
release_mem_region(pd->ioarea->start, size);
|
||||
|
||||
err:
|
||||
kfree(pd);
|
||||
|
@ -142,8 +141,7 @@ static int simtec_i2c_remove(struct platform_device *dev)
|
|||
i2c_del_adapter(&pd->adap);
|
||||
|
||||
iounmap(pd->reg);
|
||||
release_resource(pd->ioarea);
|
||||
kfree(pd->ioarea);
|
||||
release_mem_region(pd->ioarea->start, resource_size(pd->ioarea));
|
||||
kfree(pd);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -353,8 +353,8 @@ static int i2c_device_probe(struct device *dev)
|
|||
}
|
||||
|
||||
/*
|
||||
* An I2C ID table is not mandatory, if and only if, a suitable Device
|
||||
* Tree match table entry is supplied for the probing device.
|
||||
* An I2C ID table is not mandatory, if and only if, a suitable OF
|
||||
* or ACPI ID table is supplied for the probing device.
|
||||
*/
|
||||
if (!driver->id_table &&
|
||||
!i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
|
||||
|
|
|
@ -644,7 +644,7 @@ static int ina2xx_capture_thread(void *data)
|
|||
{
|
||||
struct iio_dev *indio_dev = data;
|
||||
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
|
||||
unsigned int sampling_us = SAMPLING_PERIOD(chip);
|
||||
int sampling_us = SAMPLING_PERIOD(chip);
|
||||
int buffer_us;
|
||||
|
||||
/*
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
#define STM32H7_CKMODE_MASK GENMASK(17, 16)
|
||||
|
||||
/* STM32 H7 maximum analog clock rate (from datasheet) */
|
||||
#define STM32H7_ADC_MAX_CLK_RATE 72000000
|
||||
#define STM32H7_ADC_MAX_CLK_RATE 36000000
|
||||
|
||||
/**
|
||||
* stm32_adc_common_regs - stm32 common registers, compatible dependent data
|
||||
|
@ -148,14 +148,14 @@ static int stm32f4_adc_clk_sel(struct platform_device *pdev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
priv->common.rate = rate;
|
||||
priv->common.rate = rate / stm32f4_pclk_div[i];
|
||||
val = readl_relaxed(priv->common.base + STM32F4_ADC_CCR);
|
||||
val &= ~STM32F4_ADC_ADCPRE_MASK;
|
||||
val |= i << STM32F4_ADC_ADCPRE_SHIFT;
|
||||
writel_relaxed(val, priv->common.base + STM32F4_ADC_CCR);
|
||||
|
||||
dev_dbg(&pdev->dev, "Using analog clock source at %ld kHz\n",
|
||||
rate / (stm32f4_pclk_div[i] * 1000));
|
||||
priv->common.rate / 1000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -250,7 +250,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
|
|||
|
||||
out:
|
||||
/* rate used later by each ADC instance to control BOOST mode */
|
||||
priv->common.rate = rate;
|
||||
priv->common.rate = rate / div;
|
||||
|
||||
/* Set common clock mode and prescaler */
|
||||
val = readl_relaxed(priv->common.base + STM32H7_ADC_CCR);
|
||||
|
@ -260,7 +260,7 @@ static int stm32h7_adc_clk_sel(struct platform_device *pdev,
|
|||
writel_relaxed(val, priv->common.base + STM32H7_ADC_CCR);
|
||||
|
||||
dev_dbg(&pdev->dev, "Using %s clock/%d source at %ld kHz\n",
|
||||
ckmode ? "bus" : "adc", div, rate / (div * 1000));
|
||||
ckmode ? "bus" : "adc", div, priv->common.rate / 1000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -111,8 +111,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
|
|||
s32 poll_value = 0;
|
||||
|
||||
if (state) {
|
||||
if (!atomic_read(&st->user_requested_state))
|
||||
return 0;
|
||||
if (sensor_hub_device_open(st->hsdev))
|
||||
return -EIO;
|
||||
|
||||
|
@ -161,6 +159,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
|
|||
&report_val);
|
||||
}
|
||||
|
||||
pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
|
||||
st->pdev->name, state_val, report_val);
|
||||
|
||||
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
|
||||
st->power_state.index,
|
||||
sizeof(state_val), &state_val);
|
||||
|
@ -182,6 +183,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
|
|||
ret = pm_runtime_get_sync(&st->pdev->dev);
|
||||
else {
|
||||
pm_runtime_mark_last_busy(&st->pdev->dev);
|
||||
pm_runtime_use_autosuspend(&st->pdev->dev);
|
||||
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
|
||||
}
|
||||
if (ret < 0) {
|
||||
|
@ -285,8 +287,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
|
|||
/* Default to 3 seconds, but can be changed from sysfs */
|
||||
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
|
||||
3000);
|
||||
pm_runtime_use_autosuspend(&attrb->pdev->dev);
|
||||
|
||||
return ret;
|
||||
error_unreg_trigger:
|
||||
iio_trigger_unregister(trig);
|
||||
|
|
|
@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
|
|||
.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
|
||||
.gyro_max_scale = 450,
|
||||
.accel_max_val = IIO_M_S_2_TO_G(12500),
|
||||
.accel_max_scale = 5,
|
||||
.accel_max_scale = 10,
|
||||
},
|
||||
[ADIS16485] = {
|
||||
.channels = adis16485_channels,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue