Linux 4.17-rc2

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAlrdQu4eHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGVjEIAJqS+sFJCAL8rNAv
 tiVJHuAjogVdZGJJFBUWyb4yNZw7nRSKfitaSe875WdF55IGEhnMDbAGe7IMEb5j
 1F8Ml2bzJzMWxfBWAzeU+wj6FaQksbIsI1gVM8tqk/Wtu121pB32VW8R82oHg+Hr
 sjsFTKFicNsqih+7QTVujaRjSmabKf0/JdyYM6p1cqWrxZQ0pmFaGDu0rwet9PFx
 lJsewOmnoZ0GV/Qzn40E304Xf+Vv2gVDVbC5wY86ejNigFt+5qN+gtDqDu7UkftR
 ZfD4vJuiKCigNfUrpbJWfpbegBiQc0JMvjLWWhgo/AYdGhNGMlwjQanh2oZcXlrw
 VmrNduo=
 =/j3z
 -----END PGP SIGNATURE-----

Merge tag 'v4.17-rc2' into docs-next

  Merge -rc2 to pick up the changes to
  Documentation/core-api/kernel-api.rst that hit mainline via the
  networking tree.  In their absence, subsequent patches cannot be
  applied.
This commit is contained in:
Jonathan Corbet 2018-04-27 17:13:20 -06:00
commit ccf2b06794
325 changed files with 5662 additions and 2490 deletions

View File

@ -136,6 +136,19 @@ Sorting
.. kernel-doc:: lib/list_sort.c
:export:
Text Searching
--------------
.. kernel-doc:: lib/textsearch.c
:doc: ts_intro
.. kernel-doc:: lib/textsearch.c
:export:
.. kernel-doc:: include/linux/textsearch.h
:functions: textsearch_find textsearch_next \
textsearch_get_pattern textsearch_get_pattern_len
UUID/GUID
---------

View File

@ -49,19 +49,6 @@ on the SoC (only first trip points defined in DT will be configured):
- samsung,exynos5433-tmu: 8
- samsung,exynos7-tmu: 8
Following properties are mandatory (depending on SoC):
- samsung,tmu_gain: Gain value for internal TMU operation.
- samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage
- samsung,tmu_noise_cancel_mode: Mode for noise cancellation
- samsung,tmu_efuse_value: Default level of temperature - it is needed when
in factory fusing produced wrong value
- samsung,tmu_min_efuse_value: Minimum temperature fused value
- samsung,tmu_max_efuse_value: Maximum temperature fused value
- samsung,tmu_first_point_trim: First point trimming value
- samsung,tmu_second_point_trim: Second point trimming value
- samsung,tmu_default_temp_offset: Default temperature offset
- samsung,tmu_cal_type: Callibration type
** Optional properties:
- vtmu-supply: This entry is optional and provides the regulator node supplying
@ -78,7 +65,7 @@ Example 1):
clocks = <&clock 383>;
clock-names = "tmu_apbif";
vtmu-supply = <&tmu_regulator_node>;
#include "exynos4412-tmu-sensor-conf.dtsi"
#thermal-sensor-cells = <0>;
};
Example 2):
@ -89,7 +76,7 @@ Example 2):
interrupts = <0 58 0>;
clocks = <&clock 21>;
clock-names = "tmu_apbif";
#include "exynos5440-tmu-sensor-conf.dtsi"
#thermal-sensor-cells = <0>;
};
Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
@ -99,7 +86,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
interrupts = <0 184 0>;
clocks = <&clock 318>, <&clock 318>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#thermal-sensor-cells = <0>;
};
tmu_cpu3: tmu@1006c000 {
@ -108,7 +95,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
interrupts = <0 185 0>;
clocks = <&clock 318>, <&clock 319>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#thermal-sensor-cells = <0>;
};
tmu_gpu: tmu@100a0000 {
@ -117,7 +104,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
interrupts = <0 215 0>;
clocks = <&clock 319>, <&clock 318>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
#include "exynos4412-tmu-sensor-conf.dtsi"
#thermal-sensor-cells = <0>;
};
Note: For multi-instance tmu each instance should have an alias correctly

View File

@ -55,8 +55,7 @@ of heat dissipation). For example a fan's cooling states correspond to
the different fan speeds possible. Cooling states are referred to by
single unsigned integers, where larger numbers mean greater heat
dissipation. The precise set of cooling states associated with a device
(as referred to by the cooling-min-level and cooling-max-level
properties) should be defined in a particular device's binding.
should be defined in a particular device's binding.
For more examples of cooling devices, refer to the example sections below.
Required properties:
@ -69,15 +68,6 @@ Required properties:
See Cooling device maps section below for more details
on how consumers refer to cooling devices.
Optional properties:
- cooling-min-level: An integer indicating the smallest
Type: unsigned cooling state accepted. Typically 0.
Size: one cell
- cooling-max-level: An integer indicating the largest
Type: unsigned cooling state accepted.
Size: one cell
* Trip points
The trip node is a node to describe a point in the temperature domain
@ -226,8 +216,6 @@ cpus {
396000 950000
198000 850000
>;
cooling-min-level = <0>;
cooling-max-level = <3>;
#cooling-cells = <2>; /* min followed by max */
};
...
@ -241,8 +229,6 @@ cpus {
*/
fan0: fan@48 {
...
cooling-min-level = <0>;
cooling-max-level = <9>;
#cooling-cells = <2>; /* min followed by max */
};
};

View File

@ -0,0 +1,21 @@
Nuvoton NPCM7xx timer
Nuvoton NPCM7xx have three timer modules, each timer module provides five 24-bit
timer counters.
Required properties:
- compatible : "nuvoton,npcm750-timer" for Poleg NPCM750.
- reg : Offset and length of the register set for the device.
- interrupts : Contain the timer interrupt with flags for
falling edge.
- clocks : phandle of timer reference clock (usually a 25 MHz clock).
Example:
timer@f0008000 {
compatible = "nuvoton,npcm750-timer";
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xf0008000 0x50>;
clocks = <&clk NPCM7XX_CLK_TIMER>;
};

View File

@ -15,7 +15,7 @@ Required properties:
- interrupts : Should be the clock event device interrupt.
- clocks : The clocks provided by the SoC to drive the timer, must contain
an entry for each entry in clock-names.
- clock-names : Must include the following entries: "igp" and "per".
- clock-names : Must include the following entries: "ipg" and "per".
Example:
tpm5: tpm@40260000 {

View File

@ -34,9 +34,13 @@ meta-data and shadow-data:
- data[] - storage for shadow data
It is important to note that the klp_shadow_alloc() and
klp_shadow_get_or_alloc() calls, described below, store a *copy* of the
data that the functions are provided. Callers should provide whatever
mutual exclusion is required of the shadow data.
klp_shadow_get_or_alloc() are zeroing the variable by default.
They also allow to call a custom constructor function when a non-zero
value is needed. Callers should provide whatever mutual exclusion
is required.
Note that the constructor is called under klp_shadow_lock spinlock. It allows
to do actions that can be done only once when a new variable is allocated.
* klp_shadow_get() - retrieve a shadow variable data pointer
- search hashtable for <obj, id> pair
@ -47,7 +51,7 @@ mutual exclusion is required of the shadow data.
- WARN and return NULL
- if <obj, id> doesn't already exist
- allocate a new shadow variable
- copy data into the new shadow variable
- initialize the variable using a custom constructor and data when provided
- add <obj, id> to the global hashtable
* klp_shadow_get_or_alloc() - get existing or alloc a new shadow variable
@ -56,16 +60,20 @@ mutual exclusion is required of the shadow data.
- return existing shadow variable
- if <obj, id> doesn't already exist
- allocate a new shadow variable
- copy data into the new shadow variable
- initialize the variable using a custom constructor and data when provided
- add <obj, id> pair to the global hashtable
* klp_shadow_free() - detach and free a <obj, id> shadow variable
- find and remove a <obj, id> reference from global hashtable
- if found, free shadow variable
- if found
- call destructor function if defined
- free shadow variable
* klp_shadow_free_all() - detach and free all <*, id> shadow variables
- find and remove any <*, id> references from global hashtable
- if found, free shadow variable
- if found
- call destructor function if defined
- free shadow variable
2. Use cases
@ -107,7 +115,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta = kzalloc(sizeof(*sta) + hw->sta_data_size, gfp);
/* Attach a corresponding shadow variable, then initialize it */
ps_lock = klp_shadow_alloc(sta, PS_LOCK, NULL, sizeof(*ps_lock), gfp);
ps_lock = klp_shadow_alloc(sta, PS_LOCK, sizeof(*ps_lock), gfp,
NULL, NULL);
if (!ps_lock)
goto shadow_fail;
spin_lock_init(ps_lock);
@ -131,7 +140,7 @@ variable:
void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
{
klp_shadow_free(sta, PS_LOCK);
klp_shadow_free(sta, PS_LOCK, NULL);
kfree(sta);
...
@ -148,16 +157,24 @@ shadow variables to parents already in-flight.
For commit 1d147bfa6429, a good spot to allocate a shadow spinlock is
inside ieee80211_sta_ps_deliver_wakeup():
int ps_lock_shadow_ctor(void *obj, void *shadow_data, void *ctor_data)
{
spinlock_t *lock = shadow_data;
spin_lock_init(lock);
return 0;
}
#define PS_LOCK 1
void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
DEFINE_SPINLOCK(ps_lock_fallback);
spinlock_t *ps_lock;
/* sync with ieee80211_tx_h_unicast_ps_buf */
ps_lock = klp_shadow_get_or_alloc(sta, PS_LOCK,
&ps_lock_fallback, sizeof(ps_lock_fallback),
GFP_ATOMIC);
sizeof(*ps_lock), GFP_ATOMIC,
ps_lock_shadow_ctor, NULL);
if (ps_lock)
spin_lock(ps_lock);
...

View File

@ -169,7 +169,7 @@ access to BPF code as well.
BPF engine and instruction set
------------------------------
Under tools/net/ there's a small helper tool called bpf_asm which can
Under tools/bpf/ there's a small helper tool called bpf_asm which can
be used to write low-level filters for example scenarios mentioned in the
previous section. Asm-like syntax mentioned here has been implemented in
bpf_asm and will be used for further explanations (instead of dealing with
@ -359,7 +359,7 @@ $ ./bpf_asm -c foo
In particular, as usage with xt_bpf or cls_bpf can result in more complex BPF
filters that might not be obvious at first, it's good to test filters before
attaching to a live system. For that purpose, there's a small tool called
bpf_dbg under tools/net/ in the kernel source directory. This debugger allows
bpf_dbg under tools/bpf/ in the kernel source directory. This debugger allows
for testing BPF filters against given pcap files, single stepping through the
BPF code on the pcap's packets and to do BPF machine register dumps.
@ -483,7 +483,7 @@ Example output from dmesg:
[ 3389.935851] JIT code: 00000030: 00 e8 28 94 ff e0 83 f8 01 75 07 b8 ff ff 00 00
[ 3389.935852] JIT code: 00000040: eb 02 31 c0 c9 c3
In the kernel source tree under tools/net/, there's bpf_jit_disasm for
In the kernel source tree under tools/bpf/, there's bpf_jit_disasm for
generating disassembly out of the kernel log's hexdump:
# ./bpf_jit_disasm

View File

@ -1390,26 +1390,26 @@ mld_qrv - INTEGER
Default: 2 (as specified by RFC3810 9.1)
Minimum: 1 (as specified by RFC6636 4.5)
max_dst_opts_cnt - INTEGER
max_dst_opts_number - INTEGER
Maximum number of non-padding TLVs allowed in a Destination
options extension header. If this value is less than zero
then unknown options are disallowed and the number of known
TLVs allowed is the absolute value of this number.
Default: 8
max_hbh_opts_cnt - INTEGER
max_hbh_opts_number - INTEGER
Maximum number of non-padding TLVs allowed in a Hop-by-Hop
options extension header. If this value is less than zero
then unknown options are disallowed and the number of known
TLVs allowed is the absolute value of this number.
Default: 8
max dst_opts_len - INTEGER
max_dst_opts_length - INTEGER
Maximum length allowed for a Destination options extension
header.
Default: INT_MAX (unlimited)
max hbh_opts_len - INTEGER
max_hbh_length - INTEGER
Maximum length allowed for a Hop-by-Hop options extension
header.
Default: INT_MAX (unlimited)

View File

@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/
F: drivers/net/ethernet/amd/am79c961a.*
ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT
M: Uwe Kleine-König <kernel@pengutronix.de>
M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
N: efm32
@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <kernel@pengutronix.de>
M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: Fabio Estevam <fabio.estevam@nxp.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@ -1416,7 +1418,8 @@ F: include/soc/imx/
ARM/FREESCALE VYBRID ARM ARCHITECTURE
M: Shawn Guo <shawnguo@kernel.org>
M: Sascha Hauer <kernel@pengutronix.de>
M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
R: Stefan Agner <stefan@agner.ch>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@ -4245,6 +4248,9 @@ F: include/trace/events/fs_dax.h
DEVICE DIRECT ACCESS (DAX)
M: Dan Williams <dan.j.williams@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
M: Ross Zwisler <ross.zwisler@linux.intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
L: linux-nvdimm@lists.01.org
S: Supported
F: drivers/dax/
@ -5652,7 +5658,8 @@ F: drivers/net/ethernet/freescale/fec.h
F: Documentation/devicetree/bindings/net/fsl-fec.txt
FREESCALE IMX / MXC FRAMEBUFFER DRIVER
M: Sascha Hauer <kernel@pengutronix.de>
M: Sascha Hauer <s.hauer@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
L: linux-fbdev@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@ -5784,6 +5791,14 @@ F: fs/crypto/
F: include/linux/fscrypt*.h
F: Documentation/filesystems/fscrypt.rst
FSNOTIFY: FILESYSTEM NOTIFICATION INFRASTRUCTURE
M: Jan Kara <jack@suse.cz>
R: Amir Goldstein <amir73il@gmail.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/notify/
F: include/linux/fsnotify*.h
FUJITSU LAPTOP EXTRAS
M: Jonathan Woithe <jwoithe@just42.net>
L: platform-driver-x86@vger.kernel.org
@ -6256,7 +6271,7 @@ S: Odd Fixes
F: drivers/media/usb/hdpvr/
HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
M: Jimmy Vance <jimmy.vance@hpe.com>
M: Jerry Hoemann <jerry.hoemann@hpe.com>
S: Supported
F: Documentation/watchdog/hpwdt.txt
F: drivers/watchdog/hpwdt.c
@ -8048,6 +8063,9 @@ F: tools/lib/lockdep/
LIBNVDIMM BLK: MMIO-APERTURE DRIVER
M: Ross Zwisler <ross.zwisler@linux.intel.com>
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
@ -8056,6 +8074,9 @@ F: drivers/nvdimm/region_devs.c
LIBNVDIMM BTT: BLOCK TRANSLATION TABLE
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dan Williams <dan.j.williams@intel.com>
M: Ross Zwisler <ross.zwisler@linux.intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
@ -8063,6 +8084,9 @@ F: drivers/nvdimm/btt*
LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER
M: Ross Zwisler <ross.zwisler@linux.intel.com>
M: Dan Williams <dan.j.williams@intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
S: Supported
@ -8078,6 +8102,9 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt
LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM
M: Dan Williams <dan.j.williams@intel.com>
M: Ross Zwisler <ross.zwisler@linux.intel.com>
M: Vishal Verma <vishal.l.verma@intel.com>
M: Dave Jiang <dave.jiang@intel.com>
L: linux-nvdimm@lists.01.org
Q: https://patchwork.kernel.org/project/linux-nvdimm/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git
@ -9765,6 +9792,7 @@ F: include/uapi/linux/net_namespace.h
F: tools/testing/selftests/net/
F: lib/net_utils.c
F: lib/random32.c
F: Documentation/networking/
NETWORKING [IPSEC]
M: Steffen Klassert <steffen.klassert@secunet.com>
@ -12816,7 +12844,8 @@ F: include/linux/siphash.h
SIOX
M: Gavin Schenk <g.schenk@eckelmann.de>
M: Uwe Kleine-König <kernel@pengutronix.de>
M: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
R: Pengutronix Kernel Team <kernel@pengutronix.de>
S: Supported
F: drivers/siox/*
F: include/trace/events/siox.h

View File

@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 16
PATCHLEVEL = 17
SUBLEVEL = 0
EXTRAVERSION =
EXTRAVERSION = -rc2
NAME = Fearless Coyote
# *DOCUMENTATION*

View File

@ -366,7 +366,7 @@ void force_signal_inject(int signal, int code, unsigned long address)
}
/* Force signals we don't understand to SIGKILL */
if (WARN_ON(signal != SIGKILL ||
if (WARN_ON(signal != SIGKILL &&
siginfo_layout(signal, code) != SIL_FAULT)) {
signal = SIGKILL;
}

View File

@ -204,7 +204,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
(void *)mod_shadow_start);
@ -224,7 +224,7 @@ void __init kasan_init(void)
kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
(unsigned long)kasan_mem_to_shadow(end),
pfn_to_nid(virt_to_pfn(start)));
early_pfn_to_nid(virt_to_pfn(start)));
}
/*

View File

@ -51,6 +51,8 @@ pci0: pci@10000000 {
ranges = <0x02000000 0 0x40000000
0x40000000 0 0x40000000>;
bus-range = <0x00 0xff>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pci0_intc 1>,
<0 0 0 2 &pci0_intc 2>,
@ -79,6 +81,8 @@ pci1: pci@12000000 {
ranges = <0x02000000 0 0x20000000
0x20000000 0 0x20000000>;
bus-range = <0x00 0xff>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pci1_intc 1>,
<0 0 0 2 &pci1_intc 2>,
@ -107,6 +111,8 @@ pci2: pci@14000000 {
ranges = <0x02000000 0 0x16000000
0x16000000 0 0x100000>;
bus-range = <0x00 0xff>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pci2_intc 1>,
<0 0 0 2 &pci2_intc 2>,

View File

@ -307,7 +307,7 @@ static inline void iounmap(const volatile void __iomem *addr)
#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
#define war_io_reorder_wmb() wmb()
#else
#define war_io_reorder_wmb() do { } while (0)
#define war_io_reorder_wmb() barrier()
#endif
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
@ -377,6 +377,8 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
BUG(); \
} \
\
/* prevent prefetching of coherent DMA data prematurely */ \
rmb(); \
return pfx##ioswab##bwlq(__mem, __val); \
}

View File

@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
{
__kernel_size_t res;
#ifdef CONFIG_CPU_MICROMIPS
/* micromips memset / bzero also clobbers t7 & t8 */
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
#else
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
#endif /* CONFIG_CPU_MICROMIPS */
if (eva_kernel_access()) {
__asm__ __volatile__(
"move\t$4, %1\n\t"
@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
: bzero_clobbers);
} else {
might_fault();
__asm__ __volatile__(
@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
: bzero_clobbers);
}
return res;

View File

@ -219,7 +219,7 @@
1: PTR_ADDIU a0, 1 /* fill bytewise */
R10KCBARRIER(0(ra))
bne t1, a0, 1b
sb a1, -1(a0)
EX(sb, a1, -1(a0), .Lsmall_fixup\@)
2: jr ra /* done */
move a2, zero
@ -252,13 +252,18 @@
PTR_L t0, TI_TASK($28)
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
LONG_ADDU a2, a0
jr ra
LONG_SUBU a2, t0
.Llast_fixup\@:
jr ra
andi v1, a2, STORMASK
nop
.Lsmall_fixup\@:
PTR_SUBU a2, t1, a0
jr ra
PTR_ADDIU a2, 1
.endm

View File

@ -23,7 +23,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PA11) += pci-dma.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_64BIT) += binfmt_elf32.o sys_parisc32.o signal32.o
obj-$(CONFIG_64BIT) += sys_parisc32.o signal32.o
obj-$(CONFIG_STACKTRACE)+= stacktrace.o
obj-$(CONFIG_AUDIT) += audit.o
obj64-$(CONFIG_AUDIT) += compat_audit.o

View File

@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */
eeh_bridge_check_link(edev);

View File

@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
lbz r0,HSTATE_HWTHREAD_STATE(r13)
cmpwi r0,KVM_HWTHREAD_IN_KERNEL
beq 1f
beq 0f
li r0,KVM_HWTHREAD_IN_KERNEL
stb r0,HSTATE_HWTHREAD_STATE(r13)
/* Order setting hwthread_state vs. testing hwthread_req */
sync
lbz r0,HSTATE_HWTHREAD_REQ(r13)
0: lbz r0,HSTATE_HWTHREAD_REQ(r13)
cmpwi r0,0
beq 1f
b kvm_start_guest

View File

@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void)
return;
l1d_size = ppc64_caches.l1d.size;
/*
* If there is no d-cache-size property in the device tree, l1d_size
* could be zero. That leads to the loop in the asm wrapping around to
* 2^64-1, and then walking off the end of the fallback area and
* eventually causing a page fault which is fatal. Just default to
* something vaguely sane.
*/
if (!l1d_size)
l1d_size = (64 * 1024);
limit = min(ppc64_bolted_size(), ppc64_rma_size);
/*

View File

@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
unsigned int *target = (unsigned int *)branch_target(src);
/* Branch within the section doesn't need translating */
if (target < alt_start || target >= alt_end) {
if (target < alt_start || target > alt_end) {
instr = translate_branch(dest, src);
if (!instr)
return 1;

View File

@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
LOAD_INT(c), LOAD_FRAC(c),
count_active_contexts(),
atomic_read(&nr_spu_contexts),
idr_get_cursor(&task_active_pid_ns(current)->idr));
idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
return 0;
}

View File

@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
if (xive_pool_vps == XIVE_INVALID_VP)
return;
/* Check if pool VP already active, if it is, pull it */
if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
/* Enable the pool VP */
vp = xive_pool_vps + cpu;
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);

View File

@ -8,3 +8,4 @@ obj-$(CONFIG_APPLDATA_BASE) += appldata/
obj-y += net/
obj-$(CONFIG_PCI) += pci/
obj-$(CONFIG_NUMA) += numa/
obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += purgatory/

View File

@ -47,10 +47,6 @@ config PGSTE
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
config KEXEC
def_bool y
select KEXEC_CORE
config AUDIT_ARCH
def_bool y
@ -290,12 +286,12 @@ config MARCH_Z13
older machines.
config MARCH_Z14
bool "IBM z14"
bool "IBM z14 ZR1 and z14"
select HAVE_MARCH_Z14_FEATURES
help
Select this to enable optimizations for IBM z14 (3906 series).
The kernel will be slightly faster but will not work on older
machines.
Select this to enable optimizations for IBM z14 ZR1 and z14 (3907
and 3906 series). The kernel will be slightly faster but will not
work on older machines.
endchoice
@ -525,6 +521,26 @@ source kernel/Kconfig.preempt
source kernel/Kconfig.hz
config KEXEC
def_bool y
select KEXEC_CORE
config KEXEC_FILE
bool "kexec file based system call"
select KEXEC_CORE
select BUILD_BIN2C
depends on CRYPTO
depends on CRYPTO_SHA256
depends on CRYPTO_SHA256_S390
help
Enable the kexec file based system call. In contrast to the normal
kexec system call this system call takes file descriptors for the
kernel and initramfs as arguments.
config ARCH_HAS_KEXEC_PURGATORY
def_bool y
depends on KEXEC_FILE
config ARCH_RANDOM
def_bool y
prompt "s390 architectural random number generation API"

View File

@ -3,12 +3,6 @@
# Makefile for the linux s390-specific parts of the memory manager.
#
COMPILE_VERSION := __linux_compile_version_id__`hostname | \
tr -c '[0-9A-Za-z]' '_'`__`date | \
tr -c '[0-9A-Za-z]' '_'`_t
ccflags-y := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
targets := image
targets += bzImage
subdir- := compressed

View File

@ -1,3 +1,4 @@
sizes.h
vmlinux
vmlinux.lds
vmlinux.bin.full

View File

@ -24,13 +24,13 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@ -59,10 +59,11 @@ CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=256
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@ -305,7 +306,6 @@ CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@ -364,11 +364,11 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
@ -380,9 +380,9 @@ CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=m
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
@ -461,6 +461,7 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@ -474,6 +475,9 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
CONFIG_DRM=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_INFINIBAND=m
@ -482,7 +486,9 @@ CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@ -641,6 +647,8 @@ CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@ -649,17 +657,20 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@ -707,9 +718,8 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_PKCS7_MESSAGE_PARSER=y
CONFIG_SYSTEM_TRUSTED_KEYRING=y
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y

View File

@ -1,661 +0,0 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_GCOV_KERNEL=y
CONFIG_GCOV_PROFILE_ALL=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
CONFIG_MINIX_SUBPARTITION=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_CHSC_SCH=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_NET_IPIP=m
CONFIG_NET_IPGRE_DEMUX=m
CONFIG_NET_IPGRE=m
CONFIG_NET_IPGRE_BROADCAST=y
CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_HSTCP=m
CONFIG_TCP_CONG_HYBLA=m
CONFIG_TCP_CONG_SCALABLE=m
CONFIG_TCP_CONG_LP=m
CONFIG_TCP_CONG_VENO=m
CONFIG_TCP_CONG_YEAH=m
CONFIG_TCP_CONG_ILLINOIS=m
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
CONFIG_INET6_XFRM_MODE_TRANSPORT=m
CONFIG_INET6_XFRM_MODE_TUNNEL=m
CONFIG_INET6_XFRM_MODE_BEET=m
CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_GRE=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_NETBIOS_NS=m
CONFIG_NF_CONNTRACK_SNMP=m
CONFIG_NF_CONNTRACK_PPTP=m
CONFIG_NF_CONNTRACK_SANE=m
CONFIG_NF_CONNTRACK_SIP=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
CONFIG_NETFILTER_XT_TARGET_CT=m
CONFIG_NETFILTER_XT_TARGET_DSCP=m
CONFIG_NETFILTER_XT_TARGET_HMARK=m
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
CONFIG_NETFILTER_XT_TARGET_LOG=m
CONFIG_NETFILTER_XT_TARGET_MARK=m
CONFIG_NETFILTER_XT_TARGET_NFLOG=m
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
CONFIG_NETFILTER_XT_TARGET_TEE=m
CONFIG_NETFILTER_XT_TARGET_TPROXY=m
CONFIG_NETFILTER_XT_TARGET_TRACE=m
CONFIG_NETFILTER_XT_TARGET_SECMARK=m
CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
CONFIG_NETFILTER_XT_MATCH_BPF=m
CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
CONFIG_NETFILTER_XT_MATCH_COMMENT=m
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
CONFIG_NETFILTER_XT_MATCH_CPU=m
CONFIG_NETFILTER_XT_MATCH_DCCP=m
CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
CONFIG_NETFILTER_XT_MATCH_DSCP=m
CONFIG_NETFILTER_XT_MATCH_ESP=m
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
CONFIG_NETFILTER_XT_MATCH_HELPER=m
CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
CONFIG_NETFILTER_XT_MATCH_IPVS=m
CONFIG_NETFILTER_XT_MATCH_LENGTH=m
CONFIG_NETFILTER_XT_MATCH_LIMIT=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_MARK=m
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
CONFIG_NETFILTER_XT_MATCH_NFACCT=m
CONFIG_NETFILTER_XT_MATCH_OSF=m
CONFIG_NETFILTER_XT_MATCH_OWNER=m
CONFIG_NETFILTER_XT_MATCH_POLICY=m
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
CONFIG_IP_SET=m
CONFIG_IP_SET_BITMAP_IP=m
CONFIG_IP_SET_BITMAP_IPMAC=m
CONFIG_IP_SET_BITMAP_PORT=m
CONFIG_IP_SET_HASH_IP=m
CONFIG_IP_SET_HASH_IPPORT=m
CONFIG_IP_SET_HASH_IPPORTIP=m
CONFIG_IP_SET_HASH_IPPORTNET=m
CONFIG_IP_SET_HASH_NETPORTNET=m
CONFIG_IP_SET_HASH_NET=m
CONFIG_IP_SET_HASH_NETNET=m
CONFIG_IP_SET_HASH_NETPORT=m
CONFIG_IP_SET_HASH_NETIFACE=m
CONFIG_IP_SET_LIST_SET=m
CONFIG_IP_VS=m
CONFIG_IP_VS_PROTO_TCP=y
CONFIG_IP_VS_PROTO_UDP=y
CONFIG_IP_VS_PROTO_ESP=y
CONFIG_IP_VS_PROTO_AH=y
CONFIG_IP_VS_RR=m
CONFIG_IP_VS_WRR=m
CONFIG_IP_VS_LC=m
CONFIG_IP_VS_WLC=m
CONFIG_IP_VS_LBLC=m
CONFIG_IP_VS_LBLCR=m
CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
CONFIG_IP_NF_MATCH_RPFILTER=m
CONFIG_IP_NF_MATCH_TTL=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
CONFIG_IP_NF_NAT=m
CONFIG_IP_NF_TARGET_MASQUERADE=m
CONFIG_IP_NF_MANGLE=m
CONFIG_IP_NF_TARGET_CLUSTERIP=m
CONFIG_IP_NF_TARGET_ECN=m
CONFIG_IP_NF_TARGET_TTL=m
CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=m
CONFIG_NFT_CHAIN_ROUTE_IPV6=m
CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
CONFIG_IP6_NF_MATCH_FRAG=m
CONFIG_IP6_NF_MATCH_OPTS=m
CONFIG_IP6_NF_MATCH_HL=m
CONFIG_IP6_NF_MATCH_IPV6HEADER=m
CONFIG_IP6_NF_MATCH_MH=m
CONFIG_IP6_NF_MATCH_RPFILTER=m
CONFIG_IP6_NF_MATCH_RT=m
CONFIG_IP6_NF_TARGET_HL=m
CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
CONFIG_L2TP=m
CONFIG_L2TP_DEBUGFS=m
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=m
CONFIG_L2TP_ETH=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_VLAN_8021Q_GVRP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_MULTIQ=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFB=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_DRR=m
CONFIG_NET_SCH_MQPRIO=m
CONFIG_NET_SCH_CHOKE=m
CONFIG_NET_SCH_QFQ=m
CONFIG_NET_SCH_CODEL=m
CONFIG_NET_SCH_FQ_CODEL=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_SCH_PLUG=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_CLS_U32_PERF=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=y
CONFIG_NET_CLS_BPF=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
CONFIG_GACT_PROB=y
CONFIG_NET_ACT_MIRRED=m
CONFIG_NET_ACT_IPT=m
CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SPI_ATTRS=m
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SAS_LIBSAS=m
CONFIG_SCSI_SRP_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_SCSI_DEBUG=m
CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_SCSI_OSD_INITIATOR=m
CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
CONFIG_DM_MULTIPATH_QL=m
CONFIG_DM_MULTIPATH_ST=m
CONFIG_DM_DELAY=m
CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
CONFIG_DM_SWITCH=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_MPPE=m
CONFIG_PPPOE=m
CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
CONFIG_JOLIET=y
CONFIG_ZISOFS=y
CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_NTFS_FS=m
CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=m
CONFIG_NFS_SWAP=y
CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
# CONFIG_CIFS_DEBUG is not set
CONFIG_CIFS_DFS_UPCALL=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ASCII=m
CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m

View File

@ -25,13 +25,13 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@ -45,6 +45,8 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
@ -62,6 +64,7 @@ CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@ -301,7 +304,6 @@ CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NET_SCTPPROBE=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@ -359,11 +361,11 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_NET_TCPPROBE=m
CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
@ -375,8 +377,9 @@ CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=m
CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m
@ -455,6 +458,7 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
@ -468,6 +472,9 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
CONFIG_DRM=y
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_INFINIBAND=m
@ -476,7 +483,9 @@ CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@ -507,7 +516,6 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@ -592,8 +600,10 @@ CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m

View File

@ -8,6 +8,7 @@ CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
# CONFIG_CPU_ISOLATION is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@ -23,12 +24,12 @@ CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
@ -47,6 +48,7 @@ CONFIG_LIVEPATCH=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@ -129,10 +131,13 @@ CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_DEVKMEM=y
CONFIG_RAW_DRIVER=m
CONFIG_VIRTIO_BALLOON=y
@ -177,13 +182,15 @@ CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_KPROBES_SANITY_TEST=y
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
@ -213,6 +220,8 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m

View File

@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb)
if (sb->s_root)
hypfs_delete_tree(sb->s_root);
if (sb_info->update_file)
if (sb_info && sb_info->update_file)
hypfs_remove(sb_info->update_file);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;

View File

@ -46,4 +46,27 @@
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs) { }
struct kimage;
struct s390_load_data {
/* Pointer to the kernel buffer. Used to register cmdline etc.. */
void *kernel_buf;
/* Total size of loaded segments in memory. Used as an offset. */
size_t memsz;
/* Load address of initrd. Used to register INITRD_START in kernel. */
unsigned long initrd_load_addr;
};
int kexec_file_add_purgatory(struct kimage *image,
struct s390_load_data *data);
int kexec_file_add_initrd(struct kimage *image,
struct s390_load_data *data,
char *initrd, unsigned long initrd_len);
int *kexec_file_update_kernel(struct kimage *iamge,
struct s390_load_data *data);
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
#endif /*_S390_KEXEC_H */

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
#ifndef _S390_PURGATORY_H_
#define _S390_PURGATORY_H_
#ifndef __ASSEMBLY__
#include <linux/purgatory.h>
int verify_sha256_digest(void);
#endif /* __ASSEMBLY__ */
#endif /* _S390_PURGATORY_H_ */

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* S390 version
* Copyright IBM Corp. 1999, 2010
* Copyright IBM Corp. 1999, 2017
*/
#ifndef _ASM_S390_SETUP_H
#define _ASM_S390_SETUP_H
@ -37,17 +37,31 @@
#define LPP_MAGIC _BITUL(31)
#define LPP_PID_MASK _AC(0xffffffff, UL)
/* Offsets to entry points in kernel/head.S */
#define STARTUP_NORMAL_OFFSET 0x10000
#define STARTUP_KDUMP_OFFSET 0x10010
/* Offsets to parameters in kernel/head.S */
#define IPL_DEVICE_OFFSET 0x10400
#define INITRD_START_OFFSET 0x10408
#define INITRD_SIZE_OFFSET 0x10410
#define OLDMEM_BASE_OFFSET 0x10418
#define OLDMEM_SIZE_OFFSET 0x10420
#define COMMAND_LINE_OFFSET 0x10480
#ifndef __ASSEMBLY__
#include <asm/lowcore.h>
#include <asm/types.h>
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
#define INITRD_SIZE (*(unsigned long *) (0x10410))
#define OLDMEM_BASE (*(unsigned long *) (0x10418))
#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
#define COMMAND_LINE ((char *) (0x10480))
#define IPL_DEVICE (*(unsigned long *) (IPL_DEVICE_OFFSET))
#define INITRD_START (*(unsigned long *) (INITRD_START_OFFSET))
#define INITRD_SIZE (*(unsigned long *) (INITRD_SIZE_OFFSET))
#define OLDMEM_BASE (*(unsigned long *) (OLDMEM_BASE_OFFSET))
#define OLDMEM_SIZE (*(unsigned long *) (OLDMEM_SIZE_OFFSET))
#define COMMAND_LINE ((char *) (COMMAND_LINE_OFFSET))
extern int memory_end_set;
extern unsigned long memory_end;
@ -121,12 +135,12 @@ extern void (*_machine_power_off)(void);
#else /* __ASSEMBLY__ */
#define IPL_DEVICE 0x10400
#define INITRD_START 0x10408
#define INITRD_SIZE 0x10410
#define OLDMEM_BASE 0x10418
#define OLDMEM_SIZE 0x10420
#define COMMAND_LINE 0x10480
#define IPL_DEVICE (IPL_DEVICE_OFFSET)
#define INITRD_START (INITRD_START_OFFSET)
#define INITRD_SIZE (INITRD_SIZE_OFFSET)
#define OLDMEM_BASE (OLDMEM_BASE_OFFSET)
#define OLDMEM_SIZE (OLDMEM_SIZE_OFFSET)
#define COMMAND_LINE (COMMAND_LINE_OFFSET)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_S390_SETUP_H */

View File

@ -97,22 +97,31 @@ typedef unsigned long sigset_t;
#include <asm-generic/signal-defs.h>
#ifndef __KERNEL__
/* Here we must cater to libcs that poke about in kernel headers. */
/*
* There are two system calls in regard to sigaction, sys_rt_sigaction
* and sys_sigaction. Internally the kernel uses the struct old_sigaction
* for the older sys_sigaction system call, and the kernel version of the
* struct sigaction for the newer sys_rt_sigaction.
*
* The uapi definition for struct sigaction has made a strange distinction
* between 31-bit and 64-bit in the past. For 64-bit the uapi structure
* looks like the kernel struct sigaction, but for 31-bit it used to
* look like the kernel struct old_sigaction. That practically made the
* structure unusable for either system call. To get around this problem
* the glibc always had its own definitions for the sigaction structures.
*
* The current struct sigaction uapi definition below is suitable for the
* sys_rt_sigaction system call only.
*/
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
#ifndef __s390x__ /* lovely */
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
#else /* __s390x__ */
unsigned long sa_flags;
void (*sa_restorer)(void);
sigset_t sa_mask;
#endif /* __s390x__ */
};
#define sa_handler _u._sa_handler

View File

@ -82,6 +82,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
obj-$(CONFIG_KEXEC_FILE) += kexec_elf.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o

View File

@ -10,6 +10,7 @@
#include <linux/kbuild.h>
#include <linux/kvm_host.h>
#include <linux/sched.h>
#include <linux/purgatory.h>
#include <asm/idle.h>
#include <asm/vdso.h>
#include <asm/pgtable.h>
@ -204,5 +205,9 @@ int main(void)
OFFSET(__GMAP_ASCE, gmap, asce);
OFFSET(__SIE_PROG0C, kvm_s390_sie_block, prog0c);
OFFSET(__SIE_PROG20, kvm_s390_sie_block, prog20);
/* kexec_sha_region */
OFFSET(__KEXEC_SHA_REGION_START, kexec_sha_region, start);
OFFSET(__KEXEC_SHA_REGION_LEN, kexec_sha_region, len);
DEFINE(__KEXEC_SHA_REGION_SIZE, sizeof(struct kexec_sha_region));
return 0;
}

View File

@ -182,3 +182,4 @@ COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int,
COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)

View File

@ -0,0 +1,147 @@
// SPDX-License-Identifier: GPL-2.0
/*
* ELF loader for kexec_file_load system call.
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <asm/setup.h>
static int kexec_file_add_elf_kernel(struct kimage *image,
struct s390_load_data *data,
char *kernel, unsigned long kernel_len)
{
struct kexec_buf buf;
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
int i, ret;
ehdr = (Elf_Ehdr *)kernel;
buf.image = image;
phdr = (void *)ehdr + ehdr->e_phoff;
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
if (phdr->p_type != PT_LOAD)
continue;
buf.buffer = kernel + phdr->p_offset;
buf.bufsz = phdr->p_filesz;
buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
buf.memsz = phdr->p_memsz;
if (phdr->p_paddr == 0) {
data->kernel_buf = buf.buffer;
data->memsz += STARTUP_NORMAL_OFFSET;
buf.buffer += STARTUP_NORMAL_OFFSET;
buf.bufsz -= STARTUP_NORMAL_OFFSET;
buf.mem += STARTUP_NORMAL_OFFSET;
buf.memsz -= STARTUP_NORMAL_OFFSET;
}
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
ret = kexec_add_buffer(&buf);
if (ret)
return ret;
data->memsz += buf.memsz;
}
return 0;
}
static void *s390_elf_load(struct kimage *image,
char *kernel, unsigned long kernel_len,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
struct s390_load_data data = {0};
const Elf_Ehdr *ehdr;
const Elf_Phdr *phdr;
size_t size;
int i, ret;
/* image->fobs->probe already checked for valid ELF magic number. */
ehdr = (Elf_Ehdr *)kernel;
if (ehdr->e_type != ET_EXEC ||
ehdr->e_ident[EI_CLASS] != ELFCLASS64 ||
!elf_check_arch(ehdr))
return ERR_PTR(-EINVAL);
if (!ehdr->e_phnum || ehdr->e_phentsize != sizeof(Elf_Phdr))
return ERR_PTR(-EINVAL);
size = ehdr->e_ehsize + ehdr->e_phoff;
size += ehdr->e_phentsize * ehdr->e_phnum;
if (size > kernel_len)
return ERR_PTR(-EINVAL);
phdr = (void *)ehdr + ehdr->e_phoff;
size = ALIGN(size, phdr->p_align);
for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
if (phdr->p_type == PT_INTERP)
return ERR_PTR(-EINVAL);
if (phdr->p_offset > kernel_len)
return ERR_PTR(-EINVAL);
size += ALIGN(phdr->p_filesz, phdr->p_align);
}
if (size > kernel_len)
return ERR_PTR(-EINVAL);
ret = kexec_file_add_elf_kernel(image, &data, kernel, kernel_len);
if (ret)
return ERR_PTR(ret);
if (!data.memsz)
return ERR_PTR(-EINVAL);
if (initrd) {
ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
if (ret)
return ERR_PTR(ret);
}
ret = kexec_file_add_purgatory(image, &data);
if (ret)
return ERR_PTR(ret);
return kexec_file_update_kernel(image, &data);
}
static int s390_elf_probe(const char *buf, unsigned long len)
{
const Elf_Ehdr *ehdr;
if (len < sizeof(Elf_Ehdr))
return -ENOEXEC;
ehdr = (Elf_Ehdr *)buf;
/* Only check the ELF magic number here and do proper validity check
* in the loader. Any check here that fails would send the erroneous
* ELF file to the image loader that does not care what it gets.
* (Most likely) causing behavior not intended by the user.
*/
if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
return -ENOEXEC;
return 0;
}
const struct kexec_file_ops s390_kexec_elf_ops = {
.probe = s390_elf_probe,
.load = s390_elf_load,
};

View File

@ -0,0 +1,76 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Image loader for kexec_file_load system call.
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/kexec.h>
#include <asm/setup.h>
static int kexec_file_add_image_kernel(struct kimage *image,
struct s390_load_data *data,
char *kernel, unsigned long kernel_len)
{
struct kexec_buf buf;
int ret;
buf.image = image;
buf.buffer = kernel + STARTUP_NORMAL_OFFSET;
buf.bufsz = kernel_len - STARTUP_NORMAL_OFFSET;
buf.mem = STARTUP_NORMAL_OFFSET;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
ret = kexec_add_buffer(&buf);
data->kernel_buf = kernel;
data->memsz += buf.memsz + STARTUP_NORMAL_OFFSET;
return ret;
}
static void *s390_image_load(struct kimage *image,
char *kernel, unsigned long kernel_len,
char *initrd, unsigned long initrd_len,
char *cmdline, unsigned long cmdline_len)
{
struct s390_load_data data = {0};
int ret;
ret = kexec_file_add_image_kernel(image, &data, kernel, kernel_len);
if (ret)
return ERR_PTR(ret);
if (initrd) {
ret = kexec_file_add_initrd(image, &data, initrd, initrd_len);
if (ret)
return ERR_PTR(ret);
}
ret = kexec_file_add_purgatory(image, &data);
if (ret)
return ERR_PTR(ret);
return kexec_file_update_kernel(image, &data);
}
static int s390_image_probe(const char *buf, unsigned long len)
{
/* Can't reliably tell if an image is valid. Therefore give the
* user whatever he wants.
*/
return 0;
}
const struct kexec_file_ops s390_kexec_image_ops = {
.probe = s390_image_probe,
.load = s390_image_load,
};

View File

@ -0,0 +1,245 @@
// SPDX-License-Identifier: GPL-2.0
/*
* s390 code for kexec_file_load system call
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
#include <linux/elf.h>
#include <linux/kexec.h>
#include <asm/setup.h>
const struct kexec_file_ops * const kexec_file_loaders[] = {
&s390_kexec_elf_ops,
&s390_kexec_image_ops,
NULL,
};
int *kexec_file_update_kernel(struct kimage *image,
struct s390_load_data *data)
{
unsigned long *loc;
if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE)
return ERR_PTR(-EINVAL);
if (image->cmdline_buf_len)
memcpy(data->kernel_buf + COMMAND_LINE_OFFSET,
image->cmdline_buf, image->cmdline_buf_len);
if (image->type == KEXEC_TYPE_CRASH) {
loc = (unsigned long *)(data->kernel_buf + OLDMEM_BASE_OFFSET);
*loc = crashk_res.start;
loc = (unsigned long *)(data->kernel_buf + OLDMEM_SIZE_OFFSET);
*loc = crashk_res.end - crashk_res.start + 1;
}
if (image->initrd_buf) {
loc = (unsigned long *)(data->kernel_buf + INITRD_START_OFFSET);
*loc = data->initrd_load_addr;
loc = (unsigned long *)(data->kernel_buf + INITRD_SIZE_OFFSET);
*loc = image->initrd_buf_len;
}
return NULL;
}
static int kexec_file_update_purgatory(struct kimage *image)
{
u64 entry, type;
int ret;
if (image->type == KEXEC_TYPE_CRASH) {
entry = STARTUP_KDUMP_OFFSET;
type = KEXEC_TYPE_CRASH;
} else {
entry = STARTUP_NORMAL_OFFSET;
type = KEXEC_TYPE_DEFAULT;
}
ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
sizeof(entry), false);
if (ret)
return ret;
ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
sizeof(type), false);
if (ret)
return ret;
if (image->type == KEXEC_TYPE_CRASH) {
u64 crash_size;
ret = kexec_purgatory_get_set_symbol(image, "crash_start",
&crashk_res.start,
sizeof(crashk_res.start),
false);
if (ret)
return ret;
crash_size = crashk_res.end - crashk_res.start + 1;
ret = kexec_purgatory_get_set_symbol(image, "crash_size",
&crash_size,
sizeof(crash_size),
false);
}
return ret;
}
int kexec_file_add_purgatory(struct kimage *image, struct s390_load_data *data)
{
struct kexec_buf buf;
int ret;
buf.image = image;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
ret = kexec_load_purgatory(image, &buf);
if (ret)
return ret;
ret = kexec_file_update_purgatory(image);
return ret;
}
int kexec_file_add_initrd(struct kimage *image, struct s390_load_data *data,
char *initrd, unsigned long initrd_len)
{
struct kexec_buf buf;
int ret;
buf.image = image;
buf.buffer = initrd;
buf.bufsz = initrd_len;
data->memsz = ALIGN(data->memsz, PAGE_SIZE);
buf.mem = data->memsz;
if (image->type == KEXEC_TYPE_CRASH)
buf.mem += crashk_res.start;
buf.memsz = buf.bufsz;
data->initrd_load_addr = buf.mem;
data->memsz += buf.memsz;
ret = kexec_add_buffer(&buf);
return ret;
}
/*
* The kernel is loaded to a fixed location. Turn off kexec_locate_mem_hole
* and provide kbuf->mem by hand.
*/
int arch_kexec_walk_mem(struct kexec_buf *kbuf,
int (*func)(struct resource *, void *))
{
return 1;
}
int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
Elf_Shdr *section,
const Elf_Shdr *relsec,
const Elf_Shdr *symtab)
{
Elf_Rela *relas;
int i;
relas = (void *)pi->ehdr + relsec->sh_offset;
for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
const Elf_Sym *sym; /* symbol to relocate */
unsigned long addr; /* final location after relocation */
unsigned long val; /* relocated symbol value */
void *loc; /* tmp location to modify */
sym = (void *)pi->ehdr + symtab->sh_offset;
sym += ELF64_R_SYM(relas[i].r_info);
if (sym->st_shndx == SHN_UNDEF)
return -ENOEXEC;
if (sym->st_shndx == SHN_COMMON)
return -ENOEXEC;
if (sym->st_shndx >= pi->ehdr->e_shnum &&
sym->st_shndx != SHN_ABS)
return -ENOEXEC;
loc = pi->purgatory_buf;
loc += section->sh_offset;
loc += relas[i].r_offset;
val = sym->st_value;
if (sym->st_shndx != SHN_ABS)
val += pi->sechdrs[sym->st_shndx].sh_addr;
val += relas[i].r_addend;
addr = section->sh_addr + relas[i].r_offset;
switch (ELF64_R_TYPE(relas[i].r_info)) {
case R_390_8: /* Direct 8 bit. */
*(u8 *)loc = val;
break;
case R_390_12: /* Direct 12 bit. */
*(u16 *)loc &= 0xf000;
*(u16 *)loc |= val & 0xfff;
break;
case R_390_16: /* Direct 16 bit. */
*(u16 *)loc = val;
break;
case R_390_20: /* Direct 20 bit. */
*(u32 *)loc &= 0xf00000ff;
*(u32 *)loc |= (val & 0xfff) << 16; /* DL */
*(u32 *)loc |= (val & 0xff000) >> 4; /* DH */
break;
case R_390_32: /* Direct 32 bit. */
*(u32 *)loc = val;
break;
case R_390_64: /* Direct 64 bit. */
*(u64 *)loc = val;
break;
case R_390_PC16: /* PC relative 16 bit. */
*(u16 *)loc = (val - addr);
break;
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
*(u16 *)loc = (val - addr) >> 1;
break;
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
*(u32 *)loc = (val - addr) >> 1;
break;
case R_390_PC32: /* PC relative 32 bit. */
*(u32 *)loc = (val - addr);
break;
case R_390_PC64: /* PC relative 64 bit. */
*(u64 *)loc = (val - addr);
break;
default:
break;
}
}
return 0;
}
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
unsigned long buf_len)
{
/* A kernel must be at least large enough to contain head.S. During
* load memory in head.S will be accessed, e.g. to register the next
* command line. If the next kernel were smaller the current kernel
* will panic at load.
*
* 0x11000 = sizeof(head.S)
*/
if (buf_len < 0x11000)
return -ENOEXEC;
return kexec_image_probe_default(image, buf, buf_len);
}

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/device.h>
#include <linux/cpu.h>
#include <asm/nospec-branch.h>
static int __init nobp_setup_early(char *str)

View File

@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
model = cpumcf_z13_pmu_event_attr;
break;
case 0x3906:
case 0x3907:
model = cpumcf_z14_pmu_event_attr;
break;
default:

View File

@ -821,6 +821,7 @@ static int __init setup_hwcaps(void)
strcpy(elf_platform, "z13");
break;
case 0x3906:
case 0x3907:
strcpy(elf_platform, "z14");
break;
}

View File

@ -388,3 +388,4 @@
378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage
379 common statx sys_statx compat_sys_statx
380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load

2
arch/s390/purgatory/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
kexec-purgatory.c
purgatory.ro

View File

@ -0,0 +1,37 @@
# SPDX-License-Identifier: GPL-2.0
OBJECT_FILES_NON_STANDARD := y
purgatory-y := head.o purgatory.o string.o sha256.o mem.o
targets += $(purgatory-y) purgatory.ro kexec-purgatory.c
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
$(obj)/sha256.o: $(srctree)/lib/sha256.c
$(call if_changed_rule,cc_o_c)
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S
$(call if_changed_rule,as_o_S)
$(obj)/string.o: $(srctree)/arch/s390/lib/string.c
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib
LDFLAGS_purgatory.ro += -z nodefaultlib
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
KBUILD_CFLAGS += -c -MD -Os -m64
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
CMD_BIN2C = $(objtree)/scripts/basic/bin2c
quiet_cmd_bin2c = BIN2C $@
cmd_bin2c = $(CMD_BIN2C) kexec_purgatory < $< > $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
obj-$(CONFIG_ARCH_HAS_KEXEC_PURGATORY) += kexec-purgatory.o

279
arch/s390/purgatory/head.S Normal file
View File

@ -0,0 +1,279 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Purgatory setup code
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/sigp.h>
/* The purgatory is the code running between two kernels. It's main purpose
* is to verify that the next kernel was not corrupted after load and to
* start it.
*
* If the next kernel is a crash kernel there are some peculiarities to
* consider:
*
* First the purgatory is called twice. Once only to verify the
* sha digest. So if the crash kernel got corrupted the old kernel can try
* to trigger a stand-alone dumper. And once to actually load the crash kernel.
*
* Second the purgatory also has to swap the crash memory region with its
* destination at address 0. As the purgatory is part of crash memory this
* requires some finesse. The tactic here is that the purgatory first copies
* itself to the end of the destination and then swaps the rest of the
* memory running from there.
*/
#define bufsz purgatory_end-stack
.macro MEMCPY dst,src,len
lgr %r0,\dst
lgr %r1,\len
lgr %r2,\src
lgr %r3,\len
20: mvcle %r0,%r2,0
jo 20b
.endm
.macro MEMSWAP dst,src,buf,len
10: cghi \len,bufsz
jh 11f
lgr %r4,\len
j 12f
11: lghi %r4,bufsz
12: MEMCPY \buf,\dst,%r4
MEMCPY \dst,\src,%r4
MEMCPY \src,\buf,%r4
agr \dst,%r4
agr \src,%r4
sgr \len,%r4
cghi \len,0
jh 10b
.endm
.macro START_NEXT_KERNEL base
lg %r4,kernel_entry-\base(%r13)
lg %r5,load_psw_mask-\base(%r13)
ogr %r4,%r5
stg %r4,0(%r0)
xgr %r0,%r0
diag %r0,%r0,0x308
.endm
.text
.align PAGE_SIZE
ENTRY(purgatory_start)
/* The purgatory might be called after a diag308 so better set
* architecture and addressing mode.
*/
lhi %r1,1
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
larl %r5,gprregs
stmg %r6,%r15,0(%r5)
basr %r13,0
.base_crash:
/* Setup stack */
larl %r15,purgatory_end
aghi %r15,-160
/* If the next kernel is KEXEC_TYPE_CRASH the purgatory is called
* directly with a flag passed in %r2 whether the purgatory shall do
* checksum verification only (%r2 = 0 -> verification only).
*
* Check now and preserve over C function call by storing in
* %r10 whith
* 1 -> checksum verification only
* 0 -> load new kernel
*/
lghi %r10,0
lg %r11,kernel_type-.base_crash(%r13)
cghi %r11,1 /* KEXEC_TYPE_CRASH */
jne .do_checksum_verification
cghi %r2,0 /* checksum verification only */
jne .do_checksum_verification
lghi %r10,1
.do_checksum_verification:
brasl %r14,verify_sha256_digest
cghi %r10,1 /* checksum verification only */
je .return_old_kernel
cghi %r2,0 /* checksum match */
jne .disabled_wait
/* If the next kernel is a crash kernel the purgatory has to swap
* the mem regions first.
*/
cghi %r11,1 /* KEXEC_TYPE_CRASH */
je .start_crash_kernel
/* start normal kernel */
START_NEXT_KERNEL .base_crash
.return_old_kernel:
lmg %r6,%r15,gprregs-.base_crash(%r13)
br %r14
.disabled_wait:
lpswe disabled_wait_psw-.base_crash(%r13)
.start_crash_kernel:
/* Location of purgatory_start in crash memory */
lgr %r8,%r13
aghi %r8,-(.base_crash-purgatory_start)
/* Destination for this code i.e. end of memory to be swapped. */
lg %r9,crash_size-.base_crash(%r13)
aghi %r9,-(purgatory_end-purgatory_start)
/* Destination in crash memory, i.e. same as r9 but in crash memory. */
lg %r10,crash_start-.base_crash(%r13)
agr %r10,%r9
/* Buffer location (in crash memory) and size. As the purgatory is
* behind the point of no return it can re-use the stack as buffer.
*/
lghi %r11,bufsz
larl %r12,stack
MEMCPY %r12,%r9,%r11 /* dst -> (crash) buf */
MEMCPY %r9,%r8,%r11 /* self -> dst */
/* Jump to new location. */
lgr %r7,%r9
aghi %r7,.jump_to_dst-purgatory_start
br %r7
.jump_to_dst:
basr %r13,0
.base_dst:
/* clear buffer */
MEMCPY %r12,%r10,%r11 /* (crash) buf -> (crash) dst */
/* Load new buffer location after jump */
larl %r7,stack
aghi %r10,stack-purgatory_start
MEMCPY %r10,%r7,%r11 /* (new) buf -> (crash) buf */
/* Now the code is set up to run from its designated location. Start
* swapping the rest of crash memory now.
*
* The registers will be used as follow:
*
* %r0-%r4 reserved for macros defined above
* %r5-%r6 tmp registers
* %r7 pointer to current struct sha region
* %r8 index to iterate over all sha regions
* %r9 pointer in crash memory
* %r10 pointer in old kernel
* %r11 total size (still) to be moved
* %r12 pointer to buffer
*/
lgr %r12,%r7
lgr %r11,%r9
lghi %r10,0
lg %r9,crash_start-.base_dst(%r13)
lghi %r8,16 /* KEXEC_SEGMENTS_MAX */
larl %r7,purgatory_sha_regions
j .loop_first
/* Loop over all purgatory_sha_regions. */
.loop_next:
aghi %r8,-1
cghi %r8,0
je .loop_out
aghi %r7,__KEXEC_SHA_REGION_SIZE
.loop_first:
lg %r5,__KEXEC_SHA_REGION_START(%r7)
cghi %r5,0
je .loop_next
/* Copy [end last sha region, start current sha region) */
/* Note: kexec_sha_region->start points in crash memory */
sgr %r5,%r9
MEMCPY %r9,%r10,%r5
agr %r9,%r5
agr %r10,%r5
sgr %r11,%r5
/* Swap sha region */
lg %r6,__KEXEC_SHA_REGION_LEN(%r7)
MEMSWAP %r9,%r10,%r12,%r6
sg %r11,__KEXEC_SHA_REGION_LEN(%r7)
j .loop_next
.loop_out:
/* Copy rest of crash memory */
MEMCPY %r9,%r10,%r11
/* start crash kernel */
START_NEXT_KERNEL .base_dst
load_psw_mask:
.long 0x00080000,0x80000000
.align 8
disabled_wait_psw:
.quad 0x0002000180000000
.quad 0x0000000000000000 + .do_checksum_verification
gprregs:
.rept 10
.quad 0
.endr
purgatory_sha256_digest:
.global purgatory_sha256_digest
.rept 32 /* SHA256_DIGEST_SIZE */
.byte 0
.endr
purgatory_sha_regions:
.global purgatory_sha_regions
.rept 16 * __KEXEC_SHA_REGION_SIZE /* KEXEC_SEGMENTS_MAX */
.byte 0
.endr
kernel_entry:
.global kernel_entry
.quad 0
kernel_type:
.global kernel_type
.quad 0
crash_start:
.global crash_start
.quad 0
crash_size:
.global crash_size
.quad 0
.align PAGE_SIZE
stack:
/* The buffer to move this code must be as big as the code. */
.skip stack-purgatory_start
.align PAGE_SIZE
purgatory_end:

View File

@ -0,0 +1,42 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Purgatory code running between two kernels.
*
* Copyright IBM Corp. 2018
*
* Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
*/
#include <linux/kexec.h>
#include <linux/sha256.h>
#include <linux/string.h>
#include <asm/purgatory.h>
struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX];
u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE];
u64 kernel_entry;
u64 kernel_type;
u64 crash_start;
u64 crash_size;
int verify_sha256_digest(void)
{
struct kexec_sha_region *ptr, *end;
u8 digest[SHA256_DIGEST_SIZE];
struct sha256_state sctx;
sha256_init(&sctx);
end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions);
for (ptr = purgatory_sha_regions; ptr < end; ptr++)
sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len);
sha256_final(&sctx, digest);
if (memcmp(digest, purgatory_sha256_digest, sizeof(digest)))
return 1;
return 0;
}

View File

@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = {
.format_group = &hswep_uncore_cbox_format_group,
};
static struct intel_uncore_type bdx_uncore_sbox = {
.name = "sbox",
.num_counters = 4,
.num_boxes = 4,
.perf_ctr_bits = 48,
.event_ctl = HSWEP_S0_MSR_PMON_CTL0,
.perf_ctr = HSWEP_S0_MSR_PMON_CTR0,
.event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
.box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
.msr_offset = HSWEP_SBOX_MSR_OFFSET,
.ops = &hswep_uncore_sbox_msr_ops,
.format_group = &hswep_uncore_sbox_format_group,
};
#define BDX_MSR_UNCORE_SBOX 3
static struct intel_uncore_type *bdx_msr_uncores[] = {
&bdx_uncore_ubox,
&bdx_uncore_cbox,
&hswep_uncore_pcu,
&bdx_uncore_sbox,
NULL,
};
@ -3043,10 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
void bdx_uncore_cpu_init(void)
{
int pkg = topology_phys_to_logical_pkg(0);
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
/* BDX-DE doesn't have SBOX */
if (boot_cpu_data.x86_model == 86) {
uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
/* Detect systems with no SBOXes */
} else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
struct pci_dev *pdev;
u32 capid4;
pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
pci_read_config_dword(pdev, 0x94, &capid4);
if (((capid4 >> 6) & 0x3) == 0)
bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
}
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
}
@ -3264,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2),
},
{ /* PCU.3 (for Capability registers) */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
HSWEP_PCI_PCU_3),
},
{ /* end: all zeroes */ }
};

View File

@ -136,7 +136,6 @@
#endif
#ifndef __ASSEMBLY__
#ifndef __BPF__
/*
* This output constraint should be used for any inline asm which has a "call"
* instruction. Otherwise the asm may be inserted before the frame pointer
@ -146,6 +145,5 @@
register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif
#endif
#endif /* _ASM_X86_ASM_H */

View File

@ -1013,6 +1013,7 @@ struct kvm_x86_ops {
bool (*has_wbinvd_exit)(void);
u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);

View File

@ -749,13 +749,11 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT,
extern void enable_sep_cpu(void);
extern int sysenter_setup(void);
extern void early_trap_init(void);
void early_trap_pf_init(void);
/* Defined in head.S */
extern struct desc_ptr early_gdt_descr;
extern void cpu_set_gdt(int);
extern void switch_to_new_gdt(int);
extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);

View File

@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
apic_id = processor->local_apic_id;
enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
/* Ignore invalid ID */
if (apic_id == 0xffffffff)
return 0;
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size

View File

@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
* little bit simple
*/
efi_map_sz = efi_get_runtime_map_size();
efi_map_sz = ALIGN(efi_map_sz, 16);
params_cmdline_sz = sizeof(struct boot_params) + cmdline_len +
MAX_ELFCOREHDR_STR_LEN;
params_cmdline_sz = ALIGN(params_cmdline_sz, 16);
kbuf.bufsz = params_cmdline_sz + efi_map_sz +
kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) +
sizeof(struct setup_data) +
sizeof(struct efi_setup_data);
@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
if (!params)
return ERR_PTR(-ENOMEM);
efi_map_offset = params_cmdline_sz;
efi_setup_data_offset = efi_map_offset + efi_map_sz;
efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16);
/* Copy setup header onto bootparams. Documentation/x86/boot.txt */
setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset;

View File

@ -166,7 +166,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
*/
pte_prot = __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL);
/* Filter out unsuppored __PAGE_KERNEL* bits: */
pgprot_val(pte_prot) |= __supported_pte_mask;
pgprot_val(pte_prot) &= __supported_pte_mask;
pte = pfn_pte(pfn, pte_prot);
set_pte_at(mm, va, ptep, pte);
pte_unmap_unlock(ptep, ptl);

View File

@ -1,90 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Fallback functions when the main IOMMU code is not compiled in. This
code is roughly equivalent to i386. */
#include <linux/dma-direct.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/iommu.h>
#include <asm/dma.h>
#define NOMMU_MAPPING_ERROR 0
static int
check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
{
if (hwdev && !dma_capable(hwdev, bus, size)) {
if (*hwdev->dma_mask >= DMA_BIT_MASK(32))
printk(KERN_ERR
"nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
name, (long long)bus, size,
(long long)*hwdev->dma_mask);
return 0;
}
return 1;
}
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
unsigned long attrs)
{
dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset;
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return NOMMU_MAPPING_ERROR;
return bus;
}
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list
* elements are each tagged with the appropriate dma address
* and length. They are obtained via sg_dma_{address,length}(SG).
*
* NOTE: An implementation may be able to use a smaller number of
* DMA address/length pairs than there are SG table elements.
* (for example via virtual mapping capabilities)
* The routine returns the number of addr/length pairs actually
* used, at most nents.
*
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
unsigned long attrs)
{
struct scatterlist *s;
int i;
WARN_ON(nents == 0 || sg[0].length == 0);
for_each_sg(sg, s, nents, i) {
BUG_ON(!sg_page(s));
s->dma_address = sg_phys(s);
if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
return 0;
s->dma_length = s->length;
}
return nents;
}
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == NOMMU_MAPPING_ERROR;
}
const struct dma_map_ops nommu_dma_ops = {
.alloc = dma_generic_alloc_coherent,
.free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.is_phys = 1,
.mapping_error = nommu_mapping_error,
.dma_supported = x86_dma_supported,
};

View File

@ -77,6 +77,8 @@
#include <asm/i8259.h>
#include <asm/misc.h>
#include <asm/qspinlock.h>
#include <asm/intel-family.h>
#include <asm/cpu_device_id.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@ -390,15 +392,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
/*
* Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
*
* These are Intel CPUs that enumerate an LLC that is shared by
* multiple NUMA nodes. The LLC on these systems is shared for
* off-package data access but private to the NUMA node (half
* of the package) for on-package access.
*
* CPUID (the source of the information about the LLC) can only
* enumerate the cache as being shared *or* unshared, but not
* this particular configuration. The CPU in this case enumerates
* the cache to be shared across the entire package (spanning both
* NUMA nodes).
*/
static const struct x86_cpu_id snc_cpu[] = {
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X },
{}
};
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
return topology_sane(c, o, "llc");
/* Do not match if we do not have a valid APICID for cpu: */
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
return false;
return false;
/* Do not match if LLC id does not match: */
if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
return false;
/*
* Allow the SNC topology without warning. Return of false
* means 'c' does not share the LLC of 'o'. This will be
* reflected to userspace.
*/
if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
return false;
return topology_sane(c, o, "llc");
}
/*
@ -456,7 +490,8 @@ static struct sched_domain_topology_level x86_topology[] = {
/*
* Set if a package/die has multiple NUMA nodes inside.
* AMD Magny-Cours and Intel Cluster-on-Die have this.
* AMD Magny-Cours, Intel Cluster-on-Die, and Intel
* Sub-NUMA Clustering have this.
*/
static bool x86_has_numa_in_package;

View File

@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
hpet2 -= hpet1;
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
do_div(tmp, 1000000);
do_div(deltatsc, tmp);
deltatsc = div64_u64(deltatsc, tmp);
return (unsigned long) deltatsc;
}

View File

@ -1423,12 +1423,23 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0;
}
static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu))
return svm->nested.hsave->control.tsc_offset;
return vcpu->arch.tsc_offset;
}
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 g_tsc_offset = 0;
if (is_guest_mode(vcpu)) {
/* Write L1's TSC offset. */
g_tsc_offset = svm->vmcb->control.tsc_offset -
svm->nested.hsave->control.tsc_offset;
svm->nested.hsave->control.tsc_offset = offset;
@ -3322,6 +3333,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
/* Restore the original control entries */
copy_vmcb_control_area(vmcb, hsave);
svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
@ -3482,10 +3494,12 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
/* We don't want to see VMMCALLs from a nested guest */
clr_intercept(svm, INTERCEPT_VMMCALL);
svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
@ -4035,12 +4049,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct vcpu_svm *svm = to_svm(vcpu);
switch (msr_info->index) {
case MSR_IA32_TSC: {
msr_info->data = svm->vmcb->control.tsc_offset +
kvm_scale_tsc(vcpu, rdtsc());
break;
}
case MSR_STAR:
msr_info->data = svm->vmcb->save.star;
break;
@ -4193,9 +4201,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->vmcb->save.g_pat = data;
mark_dirty(svm->vmcb, VMCB_NPT);
break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
case MSR_IA32_SPEC_CTRL:
if (!msr->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
@ -5265,9 +5270,8 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
}
if (!ret && svm) {
trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
host_irq, e->gsi,
vcpu_info.vector,
trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
e->gsi, vcpu_info.vector,
vcpu_info.pi_desc_addr, set);
}
@ -7102,6 +7106,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
.read_l1_tsc_offset = svm_read_l1_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
.set_tdp_cr3 = set_tdp_cr3,

View File

@ -2880,18 +2880,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx_update_msr_bitmap(&vmx->vcpu);
}
/*
* reads and returns guest's timestamp counter "register"
* guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
* -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
*/
static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
{
u64 host_tsc, tsc_offset;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
host_tsc = rdtsc();
tsc_offset = vmcs_read64(TSC_OFFSET);
return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
if (is_guest_mode(vcpu) &&
(vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
return vcpu->arch.tsc_offset;
}
/*
@ -3524,9 +3521,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
#endif
case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_TSC:
msr_info->data = guest_read_tsc(vcpu);
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
@ -3646,9 +3640,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
case MSR_IA32_SPEC_CTRL:
if (!msr_info->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_IBRS) &&
@ -10608,6 +10599,16 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
return true;
}
static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
!page_address_valid(vcpu, vmcs12->apic_access_addr))
return -EINVAL;
else
return 0;
}
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
@ -11176,11 +11177,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
vcpu->arch.tsc_offset + vmcs12->tsc_offset);
else
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
@ -11299,6 +11297,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_vmx_check_apic_access_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
if (nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
@ -11420,6 +11421,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 msr_entry_idx;
u32 exit_qual;
int r;
enter_guest_mode(vcpu);
@ -11429,26 +11431,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
vmx_segment_cache_clear(vmx);
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual)) {
leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, exit_qual);
return 1;
}
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
r = EXIT_REASON_INVALID_STATE;
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &exit_qual))
goto fail;
nested_get_vmcs12_pages(vcpu, vmcs12);
r = EXIT_REASON_MSR_LOAD_FAIL;
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
if (msr_entry_idx) {
leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
return 1;
}
if (msr_entry_idx)
goto fail;
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@ -11457,6 +11454,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
return 0;
fail:
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu);
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
return 1;
}
/*
@ -12028,6 +12033,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
leave_guest_mode(vcpu);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
if (likely(!vmx->fail)) {
if (exit_reason == -1)
sync_vmcs12(vcpu, vmcs12);
@ -12224,10 +12232,16 @@ static inline int u64_shl_div_u64(u64 a, unsigned int shift,
static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tscl = rdtsc();
u64 guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
u64 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
struct vcpu_vmx *vmx;
u64 tscl, guest_tscl, delta_tsc;
if (kvm_mwait_in_guest(vcpu->kvm))
return -EOPNOTSUPP;
vmx = to_vmx(vcpu);
tscl = rdtsc();
guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl;
/* Convert to host delta tsc if tsc scaling is enabled */
if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
@ -12533,7 +12547,7 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
vcpu_info.vector = irq.vector;
trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
if (set)
@ -12712,6 +12726,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.read_l1_tsc_offset = vmx_read_l1_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
.set_tdp_cr3 = vmx_set_cr3,

View File

@ -1490,7 +1490,7 @@ static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
u64 curr_offset = vcpu->arch.tsc_offset;
u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}
@ -1532,7 +1532,9 @@ static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
return vcpu->arch.tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
return tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
@ -2362,6 +2364,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
vcpu->arch.smbase = data;
break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
case MSR_SMI_COUNT:
if (!msr_info->host_initiated)
return 1;
@ -2605,6 +2610,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_UCODE_REV:
msr_info->data = vcpu->arch.microcode_version;
break;
case MSR_IA32_TSC:
msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
break;
case MSR_MTRRcap:
case 0x200 ... 0x2ff:
return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
@ -2819,7 +2827,8 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
static inline bool kvm_can_mwait_in_guest(void)
{
return boot_cpu_has(X86_FEATURE_MWAIT) &&
!boot_cpu_has_bug(X86_BUG_MONITOR);
!boot_cpu_has_bug(X86_BUG_MONITOR) &&
boot_cpu_has(X86_FEATURE_ARAT);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)

View File

@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/highmem.h>
#include <asm/pgtable.h>
@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
pgprotval_t eff_in, unsigned long P)
{
int i;
pte_t *start;
pte_t *pte;
pgprotval_t prot, eff;
start = (pte_t *)pmd_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PTE; i++) {
prot = pte_flags(*start);
eff = effective_prot(eff_in, prot);
st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
pte = pte_offset_map(&addr, st->current_address);
prot = pte_flags(*pte);
eff = effective_prot(eff_in, prot);
note_page(m, st, __pgprot(prot), eff, 5);
start++;
pte_unmap(pte);
}
}
#ifdef CONFIG_KASAN

View File

@ -98,7 +98,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd)
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
} else {
/* No p4d for 4-level paging: point the pgd to the pud page table */
pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot));
pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot));
set_pgd(pgd + pgd_index(restore_jump_address), new_pgd);
}

View File

@ -671,7 +671,7 @@ static void ia_tx_poll (IADEV *iadev) {
if ((vcc->pop) && (skb1->len != 0))
{
vcc->pop(vcc, skb1);
IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
(long)skb1);)
}
else
@ -1665,7 +1665,7 @@ static void tx_intr(struct atm_dev *dev)
status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
if (status & TRANSMIT_DONE){
IF_EVENT(printk("Tansmit Done Intr logic run\n");)
IF_EVENT(printk("Transmit Done Intr logic run\n");)
spin_lock_irqsave(&iadev->tx_lock, flags);
ia_tx_poll(iadev);
spin_unlock_irqrestore(&iadev->tx_lock, flags);

View File

@ -732,6 +732,7 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
*/
enum {
Opt_queue_depth,
Opt_lock_timeout,
Opt_last_int,
/* int args above */
Opt_last_string,
@ -740,11 +741,13 @@ enum {
Opt_read_write,
Opt_lock_on_read,
Opt_exclusive,
Opt_notrim,
Opt_err
};
static match_table_t rbd_opts_tokens = {
{Opt_queue_depth, "queue_depth=%d"},
{Opt_lock_timeout, "lock_timeout=%d"},
/* int args above */
/* string args above */
{Opt_read_only, "read_only"},
@ -753,20 +756,25 @@ static match_table_t rbd_opts_tokens = {
{Opt_read_write, "rw"}, /* Alternate spelling */
{Opt_lock_on_read, "lock_on_read"},
{Opt_exclusive, "exclusive"},
{Opt_notrim, "notrim"},
{Opt_err, NULL}
};
struct rbd_options {
int queue_depth;
unsigned long lock_timeout;
bool read_only;
bool lock_on_read;
bool exclusive;
bool trim;
};
#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
#define RBD_LOCK_ON_READ_DEFAULT false
#define RBD_EXCLUSIVE_DEFAULT false
#define RBD_TRIM_DEFAULT true
static int parse_rbd_opts_token(char *c, void *private)
{
@ -796,6 +804,14 @@ static int parse_rbd_opts_token(char *c, void *private)
}
rbd_opts->queue_depth = intval;
break;
case Opt_lock_timeout:
/* 0 is "wait forever" (i.e. infinite timeout) */
if (intval < 0 || intval > INT_MAX / 1000) {
pr_err("lock_timeout out of range\n");
return -EINVAL;
}
rbd_opts->lock_timeout = msecs_to_jiffies(intval * 1000);
break;
case Opt_read_only:
rbd_opts->read_only = true;
break;
@ -808,6 +824,9 @@ static int parse_rbd_opts_token(char *c, void *private)
case Opt_exclusive:
rbd_opts->exclusive = true;
break;
case Opt_notrim:
rbd_opts->trim = false;
break;
default:
/* libceph prints "bad option" msg */
return -EINVAL;
@ -1392,7 +1411,7 @@ static bool rbd_img_is_write(struct rbd_img_request *img_req)
case OBJ_OP_DISCARD:
return true;
default:
rbd_assert(0);
BUG();
}
}
@ -2466,7 +2485,7 @@ static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
}
return false;
default:
rbd_assert(0);
BUG();
}
}
@ -2494,7 +2513,7 @@ static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
}
return false;
default:
rbd_assert(0);
BUG();
}
}
@ -3533,9 +3552,22 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
/*
* lock_rwsem must be held for read
*/
static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
{
DEFINE_WAIT(wait);
unsigned long timeout;
int ret = 0;
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
return -EBLACKLISTED;
if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
return 0;
if (!may_acquire) {
rbd_warn(rbd_dev, "exclusive lock required");
return -EROFS;
}
do {
/*
@ -3547,12 +3579,22 @@ static void rbd_wait_state_locked(struct rbd_device *rbd_dev)
prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
TASK_UNINTERRUPTIBLE);
up_read(&rbd_dev->lock_rwsem);
schedule();
timeout = schedule_timeout(ceph_timeout_jiffies(
rbd_dev->opts->lock_timeout));
down_read(&rbd_dev->lock_rwsem);
} while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
!test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags));
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
ret = -EBLACKLISTED;
break;
}
if (!timeout) {
rbd_warn(rbd_dev, "timed out waiting for lock");
ret = -ETIMEDOUT;
break;
}
} while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
finish_wait(&rbd_dev->lock_waitq, &wait);
return ret;
}
static void rbd_queue_workfn(struct work_struct *work)
@ -3638,19 +3680,10 @@ static void rbd_queue_workfn(struct work_struct *work)
(op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
if (must_be_locked) {
down_read(&rbd_dev->lock_rwsem);
if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED &&
!test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
if (rbd_dev->opts->exclusive) {
rbd_warn(rbd_dev, "exclusive lock required");
result = -EROFS;
goto err_unlock;
}
rbd_wait_state_locked(rbd_dev);
}
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
result = -EBLACKLISTED;
result = rbd_wait_state_locked(rbd_dev,
!rbd_dev->opts->exclusive);
if (result)
goto err_unlock;
}
}
img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
@ -3902,7 +3935,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
{
struct gendisk *disk;
struct request_queue *q;
u64 segment_size;
unsigned int objset_bytes =
rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
int err;
/* create gendisk info */
@ -3942,20 +3976,19 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
/* set io sizes to object size */
segment_size = rbd_obj_bytes(&rbd_dev->header);
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
q->limits.max_sectors = queue_max_hw_sectors(q);
blk_queue_max_segments(q, USHRT_MAX);
blk_queue_max_segment_size(q, UINT_MAX);
blk_queue_io_min(q, segment_size);
blk_queue_io_opt(q, segment_size);
blk_queue_io_min(q, objset_bytes);
blk_queue_io_opt(q, objset_bytes);
/* enable the discard support */
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
if (rbd_dev->opts->trim) {
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = objset_bytes;
blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
}
if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
@ -5179,8 +5212,10 @@ static int rbd_add_parse_args(const char *buf,
rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
rbd_opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
rbd_opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
rbd_opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
rbd_opts->trim = RBD_TRIM_DEFAULT;
copts = ceph_parse_options(options, mon_addrs,
mon_addrs + mon_addrs_size - 1,
@ -5216,6 +5251,8 @@ static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
{
int ret;
if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
return -EINVAL;
@ -5223,9 +5260,9 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
/* FIXME: "rbd map --exclusive" should be in interruptible */
down_read(&rbd_dev->lock_rwsem);
rbd_wait_state_locked(rbd_dev);
ret = rbd_wait_state_locked(rbd_dev, true);
up_read(&rbd_dev->lock_rwsem);
if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
if (ret) {
rbd_warn(rbd_dev, "failed to acquire exclusive lock");
return -EROFS;
}

View File

@ -427,8 +427,9 @@ struct crng_state primary_crng = {
* its value (from 0->1->2).
*/
static int crng_init = 0;
#define crng_ready() (likely(crng_init > 0))
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
__u32 out[CHACHA20_BLOCK_WORDS]);
@ -787,6 +788,36 @@ static void crng_initialize(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
#ifdef CONFIG_NUMA
static void numa_crng_init(void)
{
int i;
struct crng_state *crng;
struct crng_state **pool;
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
spin_lock_init(&crng->lock);
crng_initialize(crng);
pool[i] = crng;
}
mb();
if (cmpxchg(&crng_node_pool, NULL, pool)) {
for_each_node(i)
kfree(pool[i]);
kfree(pool);
}
}
#else
static void numa_crng_init(void) {}
#endif
/*
* crng_fast_load() can be called by code in the interrupt service
* path. So we can't afford to dilly-dally.
*/
static int crng_fast_load(const char *cp, size_t len)
{
unsigned long flags;
@ -794,7 +825,7 @@ static int crng_fast_load(const char *cp, size_t len)
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
if (crng_ready()) {
if (crng_init != 0) {
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
@ -813,6 +844,51 @@ static int crng_fast_load(const char *cp, size_t len)
return 1;
}
/*
* crng_slow_load() is called by add_device_randomness, which has two
* attributes. (1) We can't trust the buffer passed to it is
* guaranteed to be unpredictable (so it might not have any entropy at
* all), and (2) it doesn't have the performance constraints of
* crng_fast_load().
*
* So we do something more comprehensive which is guaranteed to touch
* all of the primary_crng's state, and which uses a LFSR with a
* period of 255 as part of the mixing algorithm. Finally, we do
* *not* advance crng_init_cnt since buffer we may get may be something
* like a fixed DMI table (for example), which might very well be
* unique to the machine, but is otherwise unvarying.
*/
static int crng_slow_load(const char *cp, size_t len)
{
unsigned long flags;
static unsigned char lfsr = 1;
unsigned char tmp;
unsigned i, max = CHACHA20_KEY_SIZE;
const char * src_buf = cp;
char * dest_buf = (char *) &primary_crng.state[4];
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
if (crng_init != 0) {
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
if (len > max)
max = len;
for (i = 0; i < max ; i++) {
tmp = lfsr;
lfsr >>= 1;
if (tmp & 1)
lfsr ^= 0xE1;
tmp = dest_buf[i % CHACHA20_KEY_SIZE];
dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
lfsr += (tmp << 3) | (tmp >> 5);
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 1;
}
static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
{
unsigned long flags;
@ -831,7 +907,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
_crng_backtrack_protect(&primary_crng, buf.block,
CHACHA20_KEY_SIZE);
}
spin_lock_irqsave(&primary_crng.lock, flags);
spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
unsigned long rv;
if (!arch_get_random_seed_long(&rv) &&
@ -841,9 +917,10 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
spin_unlock_irqrestore(&primary_crng.lock, flags);
spin_unlock_irqrestore(&crng->lock, flags);
if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
@ -856,8 +933,9 @@ static void _extract_crng(struct crng_state *crng,
{
unsigned long v, flags;
if (crng_init > 1 &&
time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
if (crng_ready() &&
(time_after(crng_global_init_time, crng->init_time) ||
time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
spin_lock_irqsave(&crng->lock, flags);
if (arch_get_random_long(&v))
@ -981,10 +1059,8 @@ void add_device_randomness(const void *buf, unsigned int size)
unsigned long time = random_get_entropy() ^ jiffies;
unsigned long flags;
if (!crng_ready()) {
crng_fast_load(buf, size);
return;
}
if (!crng_ready() && size)
crng_slow_load(buf, size);
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
@ -1139,7 +1215,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
if (!crng_ready()) {
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool,
sizeof(fast_pool->pool))) {
@ -1680,28 +1756,10 @@ static void init_std_data(struct entropy_store *r)
*/
static int rand_initialize(void)
{
#ifdef CONFIG_NUMA
int i;
struct crng_state *crng;
struct crng_state **pool;
#endif
init_std_data(&input_pool);
init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
#ifdef CONFIG_NUMA
pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
spin_lock_init(&crng->lock);
crng_initialize(crng);
pool[i] = crng;
}
mb();
crng_node_pool = pool;
#endif
crng_global_init_time = jiffies;
return 0;
}
early_initcall(rand_initialize);
@ -1875,6 +1933,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
input_pool.entropy_count = 0;
blocking_pool.entropy_count = 0;
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (crng_init < 2)
return -ENODATA;
crng_reseed(&primary_crng, NULL);
crng_global_init_time = jiffies - 1;
return 0;
default:
return -EINVAL;
}
@ -2212,7 +2278,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
{
struct entropy_store *poolp = &input_pool;
if (!crng_ready()) {
if (unlikely(crng_init == 0)) {
crng_fast_load(buffer, count);
return;
}

View File

@ -133,6 +133,14 @@ config VT8500_TIMER
help
Enables support for the VT8500 driver.
config NPCM7XX_TIMER
bool "NPCM7xx timer driver" if COMPILE_TEST
depends on HAS_IOMEM
select CLKSRC_MMIO
help
Enable 24-bit TIMER0 and TIMER1 counters in the NPCM7xx architecture,
While TIMER0 serves as clockevent and TIMER1 serves as clocksource.
config CADENCE_TTC_TIMER
bool "Cadence TTC timer driver" if COMPILE_TEST
depends on COMMON_CLK

View File

@ -56,6 +56,7 @@ obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
obj-$(CONFIG_OWL_TIMER) += owl-timer.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o

View File

@ -17,9 +17,14 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
#define TPM_PARAM 0x4
#define TPM_PARAM_WIDTH_SHIFT 16
#define TPM_PARAM_WIDTH_MASK (0xff << 16)
#define TPM_SC 0x10
#define TPM_SC_CMOD_INC_PER_CNT (0x1 << 3)
#define TPM_SC_CMOD_DIV_DEFAULT 0x3
#define TPM_SC_CMOD_DIV_MAX 0x7
#define TPM_SC_TOF_MASK (0x1 << 7)
#define TPM_CNT 0x14
#define TPM_MOD 0x18
#define TPM_STATUS 0x1c
@ -29,8 +34,11 @@
#define TPM_C0SC_MODE_SHIFT 2
#define TPM_C0SC_MODE_MASK 0x3c
#define TPM_C0SC_MODE_SW_COMPARE 0x4
#define TPM_C0SC_CHF_MASK (0x1 << 7)
#define TPM_C0V 0x24
static int counter_width;
static int rating;
static void __iomem *timer_base;
static struct clock_event_device clockevent_tpm;
@ -83,10 +91,11 @@ static int __init tpm_clocksource_init(unsigned long rate)
tpm_delay_timer.freq = rate;
register_current_timer_delay(&tpm_delay_timer);
sched_clock_register(tpm_read_sched_clock, 32, rate);
sched_clock_register(tpm_read_sched_clock, counter_width, rate);
return clocksource_mmio_init(timer_base + TPM_CNT, "imx-tpm",
rate, 200, 32, clocksource_mmio_readl_up);
rate, rating, counter_width,
clocksource_mmio_readl_up);
}
static int tpm_set_next_event(unsigned long delta,
@ -105,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta,
* of writing CNT registers which may cause the min_delta event got
* missed, so we need add a ETIME check here in case it happened.
*/
return (int)((next - now) <= 0) ? -ETIME : 0;
return (int)(next - now) <= 0 ? -ETIME : 0;
}
static int tpm_set_state_oneshot(struct clock_event_device *evt)
@ -139,7 +148,6 @@ static struct clock_event_device clockevent_tpm = {
.set_state_oneshot = tpm_set_state_oneshot,
.set_next_event = tpm_set_next_event,
.set_state_shutdown = tpm_set_state_shutdown,
.rating = 200,
};
static int __init tpm_clockevent_init(unsigned long rate, int irq)
@ -149,10 +157,11 @@ static int __init tpm_clockevent_init(unsigned long rate, int irq)
ret = request_irq(irq, tpm_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
"i.MX7ULP TPM Timer", &clockevent_tpm);
clockevent_tpm.rating = rating;
clockevent_tpm.cpumask = cpumask_of(0);
clockevent_tpm.irq = irq;
clockevents_config_and_register(&clockevent_tpm,
rate, 300, 0xfffffffe);
clockevents_config_and_register(&clockevent_tpm, rate, 300,
GENMASK(counter_width - 1, 1));
return ret;
}
@ -179,7 +188,7 @@ static int __init tpm_timer_init(struct device_node *np)
ipg = of_clk_get_by_name(np, "ipg");
per = of_clk_get_by_name(np, "per");
if (IS_ERR(ipg) || IS_ERR(per)) {
pr_err("tpm: failed to get igp or per clk\n");
pr_err("tpm: failed to get ipg or per clk\n");
ret = -ENODEV;
goto err_clk_get;
}
@ -197,6 +206,11 @@ static int __init tpm_timer_init(struct device_node *np)
goto err_per_clk_enable;
}
counter_width = (readl(timer_base + TPM_PARAM) & TPM_PARAM_WIDTH_MASK)
>> TPM_PARAM_WIDTH_SHIFT;
/* use rating 200 for 32-bit counter and 150 for 16-bit counter */
rating = counter_width == 0x20 ? 200 : 150;
/*
* Initialize tpm module to a known state
* 1) Counter disabled
@ -205,16 +219,25 @@ static int __init tpm_timer_init(struct device_node *np)
* 4) Channel0 disabled
* 5) DMA transfers disabled
*/
/* make sure counter is disabled */
writel(0, timer_base + TPM_SC);
/* TOF is W1C */
writel(TPM_SC_TOF_MASK, timer_base + TPM_SC);
writel(0, timer_base + TPM_CNT);
writel(0, timer_base + TPM_C0SC);
/* CHF is W1C */
writel(TPM_C0SC_CHF_MASK, timer_base + TPM_C0SC);
/* increase per cnt, div 8 by default */
writel(TPM_SC_CMOD_INC_PER_CNT | TPM_SC_CMOD_DIV_DEFAULT,
/*
* increase per cnt,
* div 8 for 32-bit counter and div 128 for 16-bit counter
*/
writel(TPM_SC_CMOD_INC_PER_CNT |
(counter_width == 0x20 ?
TPM_SC_CMOD_DIV_DEFAULT : TPM_SC_CMOD_DIV_MAX),
timer_base + TPM_SC);
/* set MOD register to maximum for free running mode */
writel(0xffffffff, timer_base + TPM_MOD);
writel(GENMASK(counter_width - 1, 0), timer_base + TPM_MOD);
rate = clk_get_rate(per) >> 3;
ret = tpm_clocksource_init(rate);

View File

@ -0,0 +1,215 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014-2018 Nuvoton Technologies tomer.maimon@nuvoton.com
* All rights reserved.
*
* Copyright 2017 Google, Inc.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clockchips.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include "timer-of.h"
/* Timers registers */
#define NPCM7XX_REG_TCSR0 0x0 /* Timer 0 Control and Status Register */
#define NPCM7XX_REG_TICR0 0x8 /* Timer 0 Initial Count Register */
#define NPCM7XX_REG_TCSR1 0x4 /* Timer 1 Control and Status Register */
#define NPCM7XX_REG_TICR1 0xc /* Timer 1 Initial Count Register */
#define NPCM7XX_REG_TDR1 0x14 /* Timer 1 Data Register */
#define NPCM7XX_REG_TISR 0x18 /* Timer Interrupt Status Register */
/* Timers control */
#define NPCM7XX_Tx_RESETINT 0x1f
#define NPCM7XX_Tx_PERIOD BIT(27)
#define NPCM7XX_Tx_INTEN BIT(29)
#define NPCM7XX_Tx_COUNTEN BIT(30)
#define NPCM7XX_Tx_ONESHOT 0x0
#define NPCM7XX_Tx_OPER GENMASK(3, 27)
#define NPCM7XX_Tx_MIN_PRESCALE 0x1
#define NPCM7XX_Tx_TDR_MASK_BITS 24
#define NPCM7XX_Tx_MAX_CNT 0xFFFFFF
#define NPCM7XX_T0_CLR_INT 0x1
#define NPCM7XX_Tx_CLR_CSR 0x0
/* Timers operating mode */
#define NPCM7XX_START_PERIODIC_Tx (NPCM7XX_Tx_PERIOD | NPCM7XX_Tx_COUNTEN | \
NPCM7XX_Tx_INTEN | \
NPCM7XX_Tx_MIN_PRESCALE)
#define NPCM7XX_START_ONESHOT_Tx (NPCM7XX_Tx_ONESHOT | NPCM7XX_Tx_COUNTEN | \
NPCM7XX_Tx_INTEN | \
NPCM7XX_Tx_MIN_PRESCALE)
#define NPCM7XX_START_Tx (NPCM7XX_Tx_COUNTEN | NPCM7XX_Tx_PERIOD | \
NPCM7XX_Tx_MIN_PRESCALE)
#define NPCM7XX_DEFAULT_CSR (NPCM7XX_Tx_CLR_CSR | NPCM7XX_Tx_MIN_PRESCALE)
static int npcm7xx_timer_resume(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val |= NPCM7XX_Tx_COUNTEN;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_timer_shutdown(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_COUNTEN;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_timer_oneshot(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_OPER;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val |= NPCM7XX_START_ONESHOT_Tx;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_timer_periodic(struct clock_event_device *evt)
{
struct timer_of *to = to_timer_of(evt);
u32 val;
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val &= ~NPCM7XX_Tx_OPER;
writel(timer_of_period(to), timer_of_base(to) + NPCM7XX_REG_TICR0);
val |= NPCM7XX_START_PERIODIC_Tx;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static int npcm7xx_clockevent_set_next_event(unsigned long evt,
struct clock_event_device *clk)
{
struct timer_of *to = to_timer_of(clk);
u32 val;
writel(evt, timer_of_base(to) + NPCM7XX_REG_TICR0);
val = readl(timer_of_base(to) + NPCM7XX_REG_TCSR0);
val |= NPCM7XX_START_Tx;
writel(val, timer_of_base(to) + NPCM7XX_REG_TCSR0);
return 0;
}
static irqreturn_t npcm7xx_timer0_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
struct timer_of *to = to_timer_of(evt);
writel(NPCM7XX_T0_CLR_INT, timer_of_base(to) + NPCM7XX_REG_TISR);
evt->event_handler(evt);
return IRQ_HANDLED;
}
static struct timer_of npcm7xx_to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
.clkevt = {
.name = "npcm7xx-timer0",
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
.set_next_event = npcm7xx_clockevent_set_next_event,
.set_state_shutdown = npcm7xx_timer_shutdown,
.set_state_periodic = npcm7xx_timer_periodic,
.set_state_oneshot = npcm7xx_timer_oneshot,
.tick_resume = npcm7xx_timer_resume,
.rating = 300,
},
.of_irq = {
.handler = npcm7xx_timer0_interrupt,
.flags = IRQF_TIMER | IRQF_IRQPOLL,
},
};
static void __init npcm7xx_clockevents_init(void)
{
writel(NPCM7XX_DEFAULT_CSR,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR0);
writel(NPCM7XX_Tx_RESETINT,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TISR);
npcm7xx_to.clkevt.cpumask = cpumask_of(0);
clockevents_config_and_register(&npcm7xx_to.clkevt,
timer_of_rate(&npcm7xx_to),
0x1, NPCM7XX_Tx_MAX_CNT);
}
static void __init npcm7xx_clocksource_init(void)
{
u32 val;
writel(NPCM7XX_DEFAULT_CSR,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
writel(NPCM7XX_Tx_MAX_CNT,
timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TICR1);
val = readl(timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
val |= NPCM7XX_START_Tx;
writel(val, timer_of_base(&npcm7xx_to) + NPCM7XX_REG_TCSR1);
clocksource_mmio_init(timer_of_base(&npcm7xx_to) +
NPCM7XX_REG_TDR1,
"npcm7xx-timer1", timer_of_rate(&npcm7xx_to),
200, (unsigned int)NPCM7XX_Tx_TDR_MASK_BITS,
clocksource_mmio_readl_down);
}
static int __init npcm7xx_timer_init(struct device_node *np)
{
int ret;
ret = timer_of_init(np, &npcm7xx_to);
if (ret)
return ret;
/* Clock input is divided by PRESCALE + 1 before it is fed */
/* to the counter */
npcm7xx_to.of_clk.rate = npcm7xx_to.of_clk.rate /
(NPCM7XX_Tx_MIN_PRESCALE + 1);
npcm7xx_clocksource_init();
npcm7xx_clockevents_init();
pr_info("Enabling NPCM7xx clocksource timer base: %px, IRQ: %d ",
timer_of_base(&npcm7xx_to), timer_of_irq(&npcm7xx_to));
return 0;
}
TIMER_OF_DECLARE(npcm7xx, "nuvoton,npcm750-timer", npcm7xx_timer_init);

View File

@ -19,6 +19,7 @@
#include <linux/dax.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include "dax-private.h"
#include "dax.h"
@ -540,6 +541,7 @@ static const struct file_operations dax_fops = {
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
.mmap = dax_mmap,
.mmap_supported_flags = MAP_SYNC,
};
static void dev_dax_release(struct device *dev)

View File

@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
lut = (struct drm_color_lut *)blob->data;
lut_size = blob->length / sizeof(struct drm_color_lut);
if (__is_lut_linear(lut, lut_size)) {
/* Set to bypass if lut is set to linear */
stream->out_transfer_func->type = TF_TYPE_BYPASS;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR;
return 0;
}
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;

View File

@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
for (i=0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
break;
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
return;
}
}
if (i == dep_table->count)
if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
}
dep_table = table_info->vdd_dep_on_sclk;
odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
for (i=0; i < dep_table->count; i++) {
if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
break;
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
return;
}
}
if (i == dep_table->count)
if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
}
}
static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,

View File

@ -412,8 +412,10 @@ typedef struct {
QuadraticInt_t ReservedEquation2;
QuadraticInt_t ReservedEquation3;
uint16_t MinVoltageUlvGfx;
uint16_t MinVoltageUlvSoc;
uint32_t Reserved[15];
uint32_t Reserved[14];

View File

@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
ssize_t ret;
int retry;
if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return 0;
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
enable ? "enable" : "disable");
return ret;
/*
* LSPCON adapters in low-power state may ignore the first write, so
* read back and verify the written value a few times.
*/
for (retry = 0; retry < 3; retry++) {
uint8_t tmp;
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
enable ? "enable" : "disable",
retry + 1);
return ret;
}
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmp, sizeof(tmp));
if (ret) {
DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
enable ? "enabling" : "disabling",
retry + 1);
return ret;
}
if (tmp == tmds_oen)
return 0;
}
return 0;
DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
enable ? "enabling" : "disabling");
return -EIO;
}
EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);

View File

@ -18,6 +18,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@ -26,20 +27,6 @@
#include "exynos_drm_iommu.h"
#include "exynos_drm_crtc.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
/*
* exynos specific framebuffer structure.
*
* @fb: drm framebuffer obejct.
* @exynos_gem: array of exynos specific gem object containing a gem object.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
dma_addr_t dma_addr[MAX_FB_BUFFER];
};
static int check_fb_gem_memory_type(struct drm_device *drm_dev,
struct exynos_drm_gem *exynos_gem)
{
@ -66,40 +53,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
return 0;
}
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
unsigned int i;
drm_framebuffer_cleanup(fb);
for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) {
struct drm_gem_object *obj;
if (exynos_fb->exynos_gem[i] == NULL)
continue;
obj = &exynos_fb->exynos_gem[i]->base;
drm_gem_object_unreference_unlocked(obj);
}
kfree(exynos_fb);
exynos_fb = NULL;
}
static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
return drm_gem_handle_create(file_priv,
&exynos_fb->exynos_gem[0]->base, handle);
}
static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.destroy = exynos_drm_fb_destroy,
.create_handle = exynos_drm_fb_create_handle,
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
};
struct drm_framebuffer *
@ -108,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct exynos_drm_gem **exynos_gem,
int count)
{
struct exynos_drm_fb *exynos_fb;
struct drm_framebuffer *fb;
int i;
int ret;
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb)
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (!fb)
return ERR_PTR(-ENOMEM);
for (i = 0; i < count; i++) {
@ -121,23 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
if (ret < 0)
goto err;
exynos_fb->exynos_gem[i] = exynos_gem[i];
exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr
+ mode_cmd->offsets[i];
fb->obj[i] = &exynos_gem[i]->base;
}
drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd);
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
DRM_ERROR("failed to initialize framebuffer\n");
goto err;
}
return &exynos_fb->fb;
return fb;
err:
kfree(exynos_fb);
kfree(fb);
return ERR_PTR(ret);
}
@ -191,12 +145,13 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
struct exynos_drm_gem *exynos_gem;
if (WARN_ON_ONCE(index >= MAX_FB_BUFFER))
return 0;
return exynos_fb->dma_addr[index];
exynos_gem = to_exynos_gem(fb->obj[index]);
return exynos_gem->dma_addr + fb->offsets[index];
}
static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = {

View File

@ -1080,6 +1080,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}

View File

@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
if (IS_BROADWELL(dev_priv))
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
/* Disable Primary/Sprite/Cursor plane */
for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE;
}
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
}

View File

@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
struct intel_vgpu_fb_info *fb_info)
{
gvt_dmabuf->drm_format = fb_info->drm_format;
gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
gvt_dmabuf->width = fb_info->width;
gvt_dmabuf->height = fb_info->height;
gvt_dmabuf->stride = fb_info->stride;

View File

@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
}
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n",
plane->base);
return -EINVAL;
}
@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
}
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n",
plane->base);
return -EINVAL;
}
@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
return -EINVAL;
}
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base);
gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n",
plane->base);
return -EINVAL;
}

View File

@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
false, 0, mm->vgpu);
}
static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
}
static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
return ret;
}
static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *entry)
{
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn;
pfn = pte_ops->get_pfn(entry);
if (pfn != vgpu->gvt->gtt.scratch_mfn)
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
pfn << PAGE_SHIFT);
}
static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
m = e;
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
m = e;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
} else
} else {
ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
ops->clear_present(&m);
}
out:
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
@ -2030,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return PTR_ERR(gtt->ggtt_mm);
}
intel_vgpu_reset_ggtt(vgpu);
intel_vgpu_reset_ggtt(vgpu, false);
return create_scratch_page_tree(vgpu);
}
@ -2315,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @vgpu: a vGPU
* @invalidate_old: invalidate old entries
*
* This function is called at the vGPU create stage
* to reset all the GGTT entries.
*
*/
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{
struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry old_entry;
u32 index;
u32 num_entries;
@ -2334,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
while (num_entries--)
while (num_entries--) {
if (invalidate_old) {
ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
ggtt_invalidate_pte(vgpu, &old_entry);
}
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
}
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
while (num_entries--)
while (num_entries--) {
if (invalidate_old) {
ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
ggtt_invalidate_pte(vgpu, &old_entry);
}
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
}
ggtt_invalidate(dev_priv);
}
@ -2360,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
* removing the shadow pages.
*/
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
}

View File

@ -193,7 +193,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old);
void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);

View File

@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
/* fall through */
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
return PTR_ERR_OR_ZERO(mm);

View File

@ -1301,7 +1301,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
}
return 0;
return -ENOTTY;
}
static ssize_t

View File

@ -1105,30 +1105,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
return ret;
goto err_perf;
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
/*
* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over.
*/
ret = i915_kick_out_firmware_fb(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
goto out_ggtt;
goto err_ggtt;
}
ret = i915_kick_out_vgacon(dev_priv);
if (ret) {
DRM_ERROR("failed to remove conflicting VGA console\n");
goto out_ggtt;
goto err_ggtt;
}
ret = i915_ggtt_init_hw(dev_priv);
if (ret)
return ret;
goto err_ggtt;
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
DRM_ERROR("failed to enable GGTT\n");
goto out_ggtt;
goto err_ggtt;
}
pci_set_master(pdev);
@ -1139,7 +1141,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
goto err_ggtt;
}
}
@ -1157,7 +1159,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
goto out_ggtt;
goto err_ggtt;
}
}
@ -1190,13 +1192,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
ret = intel_gvt_init(dev_priv);
if (ret)
goto out_ggtt;
goto err_ggtt;
return 0;
out_ggtt:
err_ggtt:
i915_ggtt_cleanup_hw(dev_priv);
err_perf:
i915_perf_fini(dev_priv);
return ret;
}

View File

@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
kfree(lut);
kmem_cache_free(eb->i915->luts, lut);
goto err_obj;
}

View File

@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915)
spin_lock_irqsave(&i915->pmu.lock, flags);
spin_lock(&kdev->power.lock);
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
i915->pmu.suspended_jiffies_last =
kdev->power.suspended_jiffies;
/*
* After the above branch intel_runtime_pm_get_if_in_use failed
* to get the runtime PM reference we cannot assume we are in
* runtime suspend since we can either: a) race with coming out
* of it before we took the power.lock, or b) there are other
* states than suspended which can bring us here.
*
* We need to double-check that we are indeed currently runtime
* suspended and if not we cannot do better than report the last
* known RC6 value.
*/
if (kdev->power.runtime_status == RPM_SUSPENDED) {
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
i915->pmu.suspended_jiffies_last =
kdev->power.suspended_jiffies;
val = kdev->power.suspended_jiffies -
i915->pmu.suspended_jiffies_last;
val += jiffies - kdev->power.accounting_timestamp;
val = kdev->power.suspended_jiffies -
i915->pmu.suspended_jiffies_last;
val += jiffies - kdev->power.accounting_timestamp;
val = jiffies_to_nsecs(val);
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
} else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
} else {
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
}
spin_unlock(&kdev->power.lock);
val = jiffies_to_nsecs(val);
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}

View File

@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
if (!IS_GEN9_BC(dev_priv))
if (!IS_GEN9(dev_priv))
return;
i915_audio_component_get_power(kdev);

View File

@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
return;
aux_channel = child->aux_channel;
ddc_pin = child->ddc_pin;
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
if (is_dvi) {
info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin);
sanitize_ddc_pin(dev_priv, port);
ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin);
if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) {
info->alternate_ddc_pin = ddc_pin;
sanitize_ddc_pin(dev_priv, port);
} else {
DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
"sticking to defaults\n",
port_name(port), ddc_pin);
}
}
if (is_dp) {

View File

@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* know the next preemption status we see corresponds
* to this ELSP update.
*/
GEM_BUG_ON(!execlists_is_active(execlists,
EXECLISTS_ACTIVE_USER));
GEM_BUG_ON(!port_count(&port[0]));
if (port_count(&port[0]) > 1)
goto unlock;
@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
memset(port, 0, sizeof(*port));
port++;
}
execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
}
static void execlists_cancel_requests(struct intel_engine_cs *engine)
@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data)
if (fw)
intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
/* If the engine is now idle, so should be the flag; and vice versa. */
GEM_BUG_ON(execlists_is_active(&engine->execlists,
EXECLISTS_ACTIVE_USER) ==
!port_isset(engine->execlists.port));
}
static void queue_request(struct intel_engine_cs *engine,

View File

@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
vc4_bo_set_label(obj, -1);
if (bo->validated_shader) {
kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;
@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
}
if (bo->validated_shader) {
kfree(bo->validated_shader->uniform_addr_offsets);
kfree(bo->validated_shader->texture_samples);
kfree(bo->validated_shader);
bo->validated_shader = NULL;

View File

@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
fail:
kfree(validation_state.branch_targets);
if (validated_shader) {
kfree(validated_shader->uniform_addr_offsets);
kfree(validated_shader->texture_samples);
kfree(validated_shader);
}

View File

@ -525,6 +525,9 @@
#define I2C_VENDOR_ID_HANTICK 0x0911
#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
#define I2C_VENDOR_ID_RAYD 0x2386
#define I2C_PRODUCT_ID_RAYD_3118 0x3118
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff

View File

@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CAPACITY:
if (dev->battery_report_type == HID_FEATURE_REPORT) {
if (dev->battery_status != HID_BATTERY_REPORTED &&
!dev->battery_avoid_query) {
value = hidinput_query_battery_capacity(dev);
if (value < 0)
return value;
@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_STATUS:
if (!dev->battery_reported &&
dev->battery_report_type == HID_FEATURE_REPORT) {
if (dev->battery_status != HID_BATTERY_REPORTED &&
!dev->battery_avoid_query) {
value = hidinput_query_battery_capacity(dev);
if (value < 0)
return value;
dev->battery_capacity = value;
dev->battery_reported = true;
dev->battery_status = HID_BATTERY_QUERIED;
}
if (!dev->battery_reported)
if (dev->battery_status == HID_BATTERY_UNKNOWN)
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else if (dev->battery_capacity == 100)
val->intval = POWER_SUPPLY_STATUS_FULL;
@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
dev->battery_report_type = report_type;
dev->battery_report_id = field->report->id;
/*
* Stylus is normally not connected to the device and thus we
* can't query the device and get meaningful battery strength.
* We have to wait for the device to report it on its own.
*/
dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
field->physical == HID_DG_STYLUS;
dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
if (IS_ERR(dev->battery)) {
error = PTR_ERR(dev->battery);
@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
capacity = hidinput_scale_battery_capacity(dev, value);
if (!dev->battery_reported || capacity != dev->battery_capacity) {
if (dev->battery_status != HID_BATTERY_REPORTED ||
capacity != dev->battery_capacity) {
dev->battery_capacity = capacity;
dev->battery_reported = true;
dev->battery_status = HID_BATTERY_REPORTED;
power_supply_changed(dev->battery);
}
}

View File

@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
int ret = 0, len;
unsigned char report_number;
if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
ret = -ENODEV;
goto out;
}
dev = hidraw_table[minor]->hid;
if (!dev->ll_driver->raw_request) {

View File

@ -47,6 +47,7 @@
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
/* flags */
#define I2C_HID_STARTED 0
@ -171,6 +172,8 @@ static const struct i2c_hid_quirks {
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
{ I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
{ 0, 0 }
};
@ -1220,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev)
if (ret)
return ret;
/* RAYDIUM device (2386:3118) need to re-send report descr cmd
* after resume, after this it will be back normal.
* otherwise it issues too many incomplete reports.
*/
if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
if (ret)
return ret;
}
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
return ret;

View File

@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
return tool_type;
}
static void wacom_exit_report(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->pen_input;
struct wacom_features *features = &wacom->features;
unsigned char *data = wacom->data;
int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
/*
* Reset all states otherwise we lose the initial states
* when in-prox next time
*/
input_report_abs(input, ABS_X, 0);
input_report_abs(input, ABS_Y, 0);
input_report_abs(input, ABS_DISTANCE, 0);
input_report_abs(input, ABS_TILT_X, 0);
input_report_abs(input, ABS_TILT_Y, 0);
if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
input_report_key(input, BTN_LEFT, 0);
input_report_key(input, BTN_MIDDLE, 0);
input_report_key(input, BTN_RIGHT, 0);
input_report_key(input, BTN_SIDE, 0);
input_report_key(input, BTN_EXTRA, 0);
input_report_abs(input, ABS_THROTTLE, 0);
input_report_abs(input, ABS_RZ, 0);
} else {
input_report_abs(input, ABS_PRESSURE, 0);
input_report_key(input, BTN_STYLUS, 0);
input_report_key(input, BTN_STYLUS2, 0);
input_report_key(input, BTN_TOUCH, 0);
input_report_abs(input, ABS_WHEEL, 0);
if (features->type >= INTUOS3S)
input_report_abs(input, ABS_Z, 0);
}
input_report_key(input, wacom->tool[idx], 0);
input_report_abs(input, ABS_MISC, 0); /* reset tool id */
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->id[idx] = 0;
}
static int wacom_intuos_inout(struct wacom_wac *wacom)
{
struct wacom_features *features = &wacom->features;
@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
if (!wacom->id[idx])
return 1;
/*
* Reset all states otherwise we lose the initial states
* when in-prox next time
*/
input_report_abs(input, ABS_X, 0);
input_report_abs(input, ABS_Y, 0);
input_report_abs(input, ABS_DISTANCE, 0);
input_report_abs(input, ABS_TILT_X, 0);
input_report_abs(input, ABS_TILT_Y, 0);
if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
input_report_key(input, BTN_LEFT, 0);
input_report_key(input, BTN_MIDDLE, 0);
input_report_key(input, BTN_RIGHT, 0);
input_report_key(input, BTN_SIDE, 0);
input_report_key(input, BTN_EXTRA, 0);
input_report_abs(input, ABS_THROTTLE, 0);
input_report_abs(input, ABS_RZ, 0);
} else {
input_report_abs(input, ABS_PRESSURE, 0);
input_report_key(input, BTN_STYLUS, 0);
input_report_key(input, BTN_STYLUS2, 0);
input_report_key(input, BTN_TOUCH, 0);
input_report_abs(input, ABS_WHEEL, 0);
if (features->type >= INTUOS3S)
input_report_abs(input, ABS_Z, 0);
}
input_report_key(input, wacom->tool[idx], 0);
input_report_abs(input, ABS_MISC, 0); /* reset tool id */
input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
wacom->id[idx] = 0;
wacom_exit_report(wacom);
return 2;
}
@ -1235,6 +1245,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
if (!valid)
continue;
if (!prox) {
wacom->shared->stylus_in_proximity = false;
wacom_exit_report(wacom);
input_sync(pen_input);
return;
}
if (range) {
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));

View File

@ -68,12 +68,12 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
goto _do;
{
char _dup[len + 1];
char *dup, *tok, *name, *val;
int tmp;
strcpy(_dup, arg);
dup = _dup;
dup = kstrdup(arg, GFP_ATOMIC);
if (!dup)
return;
while ((tok = strsep(&dup, ","))) {
if (!strlen(tok))
@ -89,6 +89,8 @@ void dsp_hwec_enable(struct dsp *dsp, const char *arg)
deftaps = tmp;
}
}
kfree(dup);
}
_do:

Some files were not shown because too many files have changed in this diff Show More