Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts.

Build issue in drivers/net/ethernet/sfc/ptp.c
  54fccfdd7c ("sfc: efx_default_channel_type APIs can be static")
  49e6123c65 ("net: sfc: fix memory leak due to ptp channel")
https://lore.kernel.org/all/20220510130556.52598fe2@canb.auug.org.au/

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-05-12 15:39:02 -07:00
commit 9b19e57a3c
196 changed files with 1842 additions and 787 deletions

View File

@ -45,6 +45,7 @@ Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andrzej Hajda <andrzej.hajda@intel.com> <a.hajda@samsung.com>
André Almeida <andrealmeid@igalia.com> <andrealmeid@collabora.com>
Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
@ -204,6 +205,7 @@ Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
Kalle Valo <kvalo@kernel.org> <kvalo@codeaurora.org>
Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>

View File

@ -55,8 +55,6 @@ allOf:
then:
properties:
clocks:
minItems: 7
maxItems: 7
items:
- description: 32k osc
- description: 25m osc
@ -66,8 +64,6 @@ allOf:
- description: ext3 clock input
- description: ext4 clock input
clock-names:
minItems: 7
maxItems: 7
items:
- const: ckil
- const: osc_25m

View File

@ -95,7 +95,6 @@ then:
properties:
clocks:
minItems: 1
maxItems: 4
items:
- description: Functional clock
- description: EXTAL input clock
@ -104,7 +103,6 @@ then:
clock-names:
minItems: 1
maxItems: 4
items:
- const: fck
# The LVDS encoder can use the EXTAL or DU_DOTCLKINx clocks.
@ -128,12 +126,10 @@ then:
else:
properties:
clocks:
maxItems: 1
items:
- description: Functional clock
clock-names:
maxItems: 1
items:
- const: fck

View File

@ -109,7 +109,6 @@ allOf:
properties:
clocks:
minItems: 1
maxItems: 3
items:
- description: Functional clock
- description: DU_DOTCLKIN0 input clock
@ -117,7 +116,6 @@ allOf:
clock-names:
minItems: 1
maxItems: 3
items:
- const: du.0
- pattern: '^dclkin\.[01]$'
@ -159,7 +157,6 @@ allOf:
properties:
clocks:
minItems: 2
maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -168,7 +165,6 @@ allOf:
clock-names:
minItems: 2
maxItems: 4
items:
- const: du.0
- const: du.1
@ -216,7 +212,6 @@ allOf:
properties:
clocks:
minItems: 2
maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -225,7 +220,6 @@ allOf:
clock-names:
minItems: 2
maxItems: 4
items:
- const: du.0
- const: du.1
@ -271,7 +265,6 @@ allOf:
properties:
clocks:
minItems: 2
maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -280,7 +273,6 @@ allOf:
clock-names:
minItems: 2
maxItems: 4
items:
- const: du.0
- const: du.1
@ -327,7 +319,6 @@ allOf:
properties:
clocks:
minItems: 2
maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -336,7 +327,6 @@ allOf:
clock-names:
minItems: 2
maxItems: 4
items:
- const: du.0
- const: du.1
@ -386,7 +376,6 @@ allOf:
properties:
clocks:
minItems: 3
maxItems: 6
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -397,7 +386,6 @@ allOf:
clock-names:
minItems: 3
maxItems: 6
items:
- const: du.0
- const: du.1
@ -448,7 +436,6 @@ allOf:
properties:
clocks:
minItems: 4
maxItems: 8
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -461,7 +448,6 @@ allOf:
clock-names:
minItems: 4
maxItems: 8
items:
- const: du.0
- const: du.1
@ -525,7 +511,6 @@ allOf:
properties:
clocks:
minItems: 3
maxItems: 6
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -536,7 +521,6 @@ allOf:
clock-names:
minItems: 3
maxItems: 6
items:
- const: du.0
- const: du.1
@ -596,7 +580,6 @@ allOf:
properties:
clocks:
minItems: 3
maxItems: 6
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -607,7 +590,6 @@ allOf:
clock-names:
minItems: 3
maxItems: 6
items:
- const: du.0
- const: du.1
@ -666,14 +648,12 @@ allOf:
properties:
clocks:
minItems: 1
maxItems: 2
items:
- description: Functional clock for DU0
- description: DU_DOTCLKIN0 input clock
clock-names:
minItems: 1
maxItems: 2
items:
- const: du.0
- const: dclkin.0
@ -723,7 +703,6 @@ allOf:
properties:
clocks:
minItems: 2
maxItems: 4
items:
- description: Functional clock for DU0
- description: Functional clock for DU1
@ -732,7 +711,6 @@ allOf:
clock-names:
minItems: 2
maxItems: 4
items:
- const: du.0
- const: du.1
@ -791,7 +769,6 @@ allOf:
- description: Functional clock
clock-names:
maxItems: 1
items:
- const: du.0

View File

@ -138,7 +138,6 @@ allOf:
- const: bus
- const: adc
minItems: 1
maxItems: 2
interrupts:
items:
@ -170,7 +169,6 @@ allOf:
- const: bus
- const: adc
minItems: 1
maxItems: 2
interrupts:
items:

View File

@ -43,8 +43,6 @@ patternProperties:
- 4 # LED output FLASH1
- 5 # LED output FLASH2
unevaluatedProperties: false
required:
- compatible
- "#address-cells"

View File

@ -202,22 +202,17 @@ allOf:
clocks:
items:
- description: module clock
minItems: 1
maxItems: 1
else:
properties:
clocks:
items:
- description: module clock
- description: timeout clock
minItems: 2
maxItems: 2
clock-names:
items:
- const: sdhci
- const: tmclk
minItems: 2
maxItems: 2
required:
- clock-names

View File

@ -147,8 +147,6 @@ allOf:
- description: SoC gpmi io clock
- description: SoC gpmi bch apb clock
clock-names:
minItems: 2
maxItems: 2
items:
- const: gpmi_io
- const: gpmi_bch_apb

View File

@ -80,8 +80,6 @@ if:
then:
properties:
interrupts:
minItems: 4
maxItems: 4
items:
- description: Error and status IRQ
- description: Message object IRQ
@ -91,7 +89,6 @@ then:
else:
properties:
interrupts:
maxItems: 1
items:
- description: Error and status IRQ

View File

@ -142,7 +142,6 @@ examples:
device_type = "pci";
reg = <0x0 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 152 0>;
max-link-speed = <2>;
#address-cells = <3>;
#size-cells = <2>;
@ -153,7 +152,6 @@ examples:
device_type = "pci";
reg = <0x800 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 153 0>;
max-link-speed = <2>;
#address-cells = <3>;
#size-cells = <2>;
@ -164,7 +162,6 @@ examples:
device_type = "pci";
reg = <0x1000 0x0 0x0 0x0 0x0>;
reset-gpios = <&pinctrl_ap 33 0>;
max-link-speed = <1>;
#address-cells = <3>;
#size-cells = <2>;

View File

@ -102,19 +102,17 @@ if:
then:
properties:
reg:
maxItems: 2
minItems: 2
reg-names:
items:
- const: "phy"
- const: "phy-ctrl"
minItems: 2
else:
properties:
reg:
maxItems: 1
reg-names:
maxItems: 1
items:
- const: "phy"
required:
- compatible

View File

@ -52,11 +52,19 @@ properties:
hardware supporting it the pull strength in Ohm.
drive-push-pull:
type: boolean
oneOf:
- type: boolean
- $ref: /schemas/types.yaml#/definitions/uint32
enum: [ 0, 1 ]
deprecated: true
description: drive actively high and low
drive-open-drain:
type: boolean
oneOf:
- type: boolean
- $ref: /schemas/types.yaml#/definitions/uint32
const: 1 # No known cases of 0
deprecated: true
description: drive with open drain
drive-open-source:

View File

@ -71,7 +71,6 @@ allOf:
then:
properties:
clock-output-names:
minItems: 1
maxItems: 1
- if:
@ -102,7 +101,6 @@ allOf:
properties:
clock-output-names:
minItems: 3
maxItems: 3
- if:
properties:
@ -113,16 +111,12 @@ allOf:
then:
properties:
clocks:
minItems: 3
maxItems: 3
items:
- description: Bus clock for register access
- description: 24 MHz oscillator
- description: 32 kHz clock from the CCU
clock-names:
minItems: 3
maxItems: 3
items:
- const: bus
- const: hosc
@ -142,7 +136,6 @@ allOf:
properties:
clocks:
minItems: 3
maxItems: 4
items:
- description: Bus clock for register access
- description: 24 MHz oscillator
@ -151,7 +144,6 @@ allOf:
clock-names:
minItems: 3
maxItems: 4
items:
- const: bus
- const: hosc
@ -174,14 +166,12 @@ allOf:
then:
properties:
interrupts:
minItems: 1
maxItems: 1
else:
properties:
interrupts:
minItems: 2
maxItems: 2
required:
- "#clock-cells"

View File

@ -100,7 +100,6 @@ allOf:
maxItems: 3
clock-names:
minItems: 2
maxItems: 3
items:
- const: uart
- pattern: '^clk_uart_baud[0-1]$'
@ -118,11 +117,8 @@ allOf:
then:
properties:
clocks:
minItems: 2
maxItems: 2
clock-names:
minItems: 2
maxItems: 2
items:
- const: uart
- const: clk_uart_baud0

View File

@ -89,7 +89,6 @@ allOf:
properties:
dmas:
minItems: 1
maxItems: 2
items:
- description: RX DMA Channel
- description: TX DMA Channel

View File

@ -80,7 +80,6 @@ allOf:
then:
properties:
clocks:
minItems: 6
items:
- description: AUXCLK clock for McASP used by CPB audio
- description: Parent for CPB_McASP auxclk (for 48KHz)
@ -107,7 +106,6 @@ allOf:
then:
properties:
clocks:
maxItems: 4
items:
- description: AUXCLK clock for McASP used by CPB audio
- description: Parent for CPB_McASP auxclk (for 48KHz)

View File

@ -67,7 +67,6 @@ then:
properties:
reg:
minItems: 2
maxItems: 3
items:
- description: TSC1 registers
- description: TSC2 registers

View File

@ -43,6 +43,9 @@ properties:
- const: phy_clk
- const: ref_clk
power-domains:
maxItems: 1
reg:
maxItems: 1

View File

@ -3571,8 +3571,9 @@ M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
S: Supported
W: http://sourceforge.net/projects/bonding/
F: Documentation/networking/bonding.rst
F: drivers/net/bonding/
F: include/net/bonding.h
F: include/net/bond*
F: include/uapi/linux/if_bonding.h
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@ -5919,7 +5920,7 @@ R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
R: Liam Mark <lmark@codeaurora.org>
R: Laura Abbott <labbott@redhat.com>
R: Brian Starkey <Brian.Starkey@arm.com>
R: John Stultz <john.stultz@linaro.org>
R: John Stultz <jstultz@google.com>
L: linux-media@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
@ -6589,7 +6590,7 @@ F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
M: Xinliang Liu <xinliang.liu@linaro.org>
M: Tian Tao <tiantao6@hisilicon.com>
R: John Stultz <john.stultz@linaro.org>
R: John Stultz <jstultz@google.com>
R: Xinwei Kong <kong.kongxinwei@hisilicon.com>
R: Chen Feng <puck.chen@hisilicon.com>
L: dri-devel@lists.freedesktop.org
@ -7501,7 +7502,7 @@ F: Documentation/hwmon/f71805f.rst
F: drivers/hwmon/f71805f.c
FADDR2LINE
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Josh Poimboeuf <jpoimboe@kernel.org>
S: Maintained
F: scripts/faddr2line
@ -8114,7 +8115,7 @@ M: Ingo Molnar <mingo@redhat.com>
R: Peter Zijlstra <peterz@infradead.org>
R: Darren Hart <dvhart@infradead.org>
R: Davidlohr Bueso <dave@stgolabs.net>
R: André Almeida <andrealmeid@collabora.com>
R: André Almeida <andrealmeid@igalia.com>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
@ -8387,7 +8388,7 @@ M: Linus Walleij <linus.walleij@linaro.org>
M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-gpio@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
F: Documentation/ABI/obsolete/sysfs-gpio
F: Documentation/ABI/testing/gpio-cdev
F: Documentation/admin-guide/gpio/
@ -8849,7 +8850,7 @@ F: Documentation/devicetree/bindings/net/hisilicon*.txt
F: drivers/net/ethernet/hisilicon/
HIKEY960 ONBOARD USB GPIO HUB DRIVER
M: John Stultz <john.stultz@linaro.org>
M: John Stultz <jstultz@google.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/misc/hisi_hikey_usb.c
@ -10132,7 +10133,7 @@ S: Supported
F: drivers/net/wireless/intel/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
M: Luca Coelho <luciano.coelho@intel.com>
M: Gregory Greenman <gregory.greenman@intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
@ -11349,7 +11350,7 @@ F: drivers/mmc/host/litex_mmc.c
N: litex
LIVE PATCHING
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Jiri Kosina <jikos@kernel.org>
M: Miroslav Benes <mbenes@suse.cz>
M: Petr Mladek <pmladek@suse.com>
@ -14250,7 +14251,7 @@ F: lib/objagg.c
F: lib/test_objagg.c
OBJTOOL
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Peter Zijlstra <peterz@infradead.org>
S: Supported
F: tools/objtool/
@ -18824,7 +18825,7 @@ F: include/dt-bindings/reset/starfive-jh7100.h
STATIC BRANCH/CALL
M: Peter Zijlstra <peterz@infradead.org>
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Jason Baron <jbaron@akamai.com>
R: Steven Rostedt <rostedt@goodmis.org>
R: Ard Biesheuvel <ardb@kernel.org>
@ -19833,7 +19834,7 @@ F: drivers/net/wireless/ti/
F: include/linux/wl12xx.h
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
M: John Stultz <john.stultz@linaro.org>
M: John Stultz <jstultz@google.com>
M: Thomas Gleixner <tglx@linutronix.de>
R: Stephen Boyd <sboyd@kernel.org>
L: linux-kernel@vger.kernel.org
@ -21484,7 +21485,7 @@ F: arch/x86/kernel/apic/x2apic_uv_x.c
F: arch/x86/platform/uv/
X86 STACK UNWINDING
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Josh Poimboeuf <jpoimboe@kernel.org>
M: Peter Zijlstra <peterz@infradead.org>
S: Supported
F: arch/x86/include/asm/unwind*.h

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc6
NAME = Superb Owl
# *DOCUMENTATION*

View File

@ -40,9 +40,9 @@
typedef unsigned int cycles_t;
/*
* On R4000/R4400 before version 5.0 an erratum exists such that if the
* cycle counter is read in the exact moment that it is matching the
* compare register, no interrupt will be generated.
* On R4000/R4400 an erratum exists such that if the cycle counter is
* read in the exact moment that it is matching the compare register,
* no interrupt will be generated.
*
* There is a suggested workaround and also the erratum can't strike if
* the compare interrupt isn't being used as the clock source device.
@ -63,7 +63,7 @@ static inline int can_use_mips_counter(unsigned int prid)
if (!__builtin_constant_p(cpu_has_counter))
asm volatile("" : "=m" (cpu_data[0].options));
if (likely(cpu_has_counter &&
prid >= (PRID_IMP_R4000 | PRID_REV_ENCODE_44(5, 0))))
prid > (PRID_IMP_R4000 | PRID_REV_ENCODE_44(15, 15))))
return 1;
else
return 0;

View File

@ -141,15 +141,10 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4400MC:
/*
* The published errata for the R4400 up to 3.0 say the CPU
* has the mfc0 from count bug.
* has the mfc0 from count bug. This seems the last version
* produced.
*/
if ((current_cpu_data.processor_id & 0xff) <= 0x30)
return 1;
/*
* we assume newer revisions are ok
*/
return 0;
return 1;
}
return 0;

View File

@ -38,6 +38,7 @@ config PARISC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_CPU_DEVICES if !SMP
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE

View File

@ -6,6 +6,9 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_CGROUPS=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
@ -47,7 +50,6 @@ CONFIG_PARPORT=y
CONFIG_PARPORT_PC=m
CONFIG_PARPORT_1284=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=6144
CONFIG_BLK_DEV_SD=y

View File

@ -16,6 +16,7 @@ CONFIG_CGROUPS=y
CONFIG_MEMCG=y
CONFIG_CGROUP_PIDS=y
CONFIG_CPUSETS=y
CONFIG_USER_NS=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@ -267,9 +268,9 @@ CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRC_CCITT=m
CONFIG_LIBCRC32C=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_KERNEL=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_SCHED_DEBUG is not set

View File

@ -160,7 +160,7 @@ extern void __update_cache(pte_t pte);
#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
#else
#define MAX_ADDRBITS (BITS_PER_LONG)
#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
#define MAX_ADDRESS (1ULL << MAX_ADDRBITS)
#define SPACEID_SHIFT 0
#endif

View File

@ -403,7 +403,7 @@ void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
unsigned long size;
unsigned long threshold, threshold2;
unsigned long threshold;
alltime = mfctl(16);
flush_data_cache();
@ -418,20 +418,8 @@ void __init parisc_setup_cache_timing(void)
alltime, size, rangetime);
threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
/*
* The threshold computed above isn't very reliable since the
* flush times depend greatly on the percentage of dirty lines
* in the flush range. Further, the whole cache time doesn't
* include the time to refill lines that aren't in the mm/vma
* being flushed. By timing glibc build and checks on mako cpus,
* the following formula seems to work reasonably well. The
* value from the timing calculation is too small, and increases
* build and check times by almost a factor two.
*/
threshold2 = cache_info.dc_size * num_online_cpus();
if (threshold2 > threshold)
threshold = threshold2;
if (threshold > cache_info.dc_size)
threshold = cache_info.dc_size;
if (threshold)
parisc_cache_flush_threshold = threshold;
printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",

View File

@ -152,7 +152,7 @@ int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs)
/* for absolute branch instructions we can copy iaoq_b. for relative
* branch instructions we need to calculate the new address based on the
* difference between iaoq_f and iaoq_b. We cannot use iaoq_b without
* modificationt because it's based on our ainsn.insn address.
* modifications because it's based on our ainsn.insn address.
*/
if (p->post_handler)

View File

@ -40,7 +40,10 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
*need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page));
raw_spin_lock_irqsave(&patch_lock, *flags);
if (flags)
raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
}
@ -49,7 +52,10 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
{
clear_fixmap(fixmap);
raw_spin_unlock_irqrestore(&patch_lock, *flags);
if (flags)
raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
}
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
@ -61,9 +67,8 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
int mapped;
/* Make sure we don't have any aliases in cache */
flush_kernel_dcache_range_asm(start, end);
flush_kernel_icache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
flush_kernel_vmap_range(addr, len);
flush_icache_range(start, end);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
@ -76,10 +81,8 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
* We're crossing a page boundary, so
* need to remap
*/
flush_kernel_dcache_range_asm((unsigned long)fixmap,
(unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap,
(unsigned long)p);
flush_kernel_vmap_range((void *)fixmap,
(p-fixmap) * sizeof(*p));
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
@ -87,10 +90,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
}
}
flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
flush_icache_range(start, end);
}
void __kprobes __patch_text(void *addr, u32 insn)

View File

@ -171,6 +171,7 @@ static int __init processor_probe(struct parisc_device *dev)
p->cpu_num = cpu_info.cpu_num;
p->cpu_loc = cpu_info.cpu_loc;
set_cpu_possible(cpuid, true);
store_cpu_topology(cpuid);
#ifdef CONFIG_SMP
@ -419,8 +420,7 @@ show_cpuinfo (struct seq_file *m, void *v)
}
seq_printf(m, " (0x%02lx)\n", boot_cpu_data.pdc.capabilities);
seq_printf(m, "model\t\t: %s\n"
"model name\t: %s\n",
seq_printf(m, "model\t\t: %s - %s\n",
boot_cpu_data.pdc.sys_model_name,
cpuinfo->dev ?
cpuinfo->dev->name : "Unknown");
@ -461,6 +461,13 @@ static struct parisc_driver cpu_driver __refdata = {
*/
void __init processor_init(void)
{
unsigned int cpu;
reset_cpu_topology();
/* reset possible mask. We will mark those which are possible. */
for_each_possible_cpu(cpu)
set_cpu_possible(cpu, false);
register_parisc_driver(&cpu_driver);
}

View File

@ -161,6 +161,8 @@ void __init setup_arch(char **cmdline_p)
#ifdef CONFIG_PA11
dma_ops_init();
#endif
clear_sched_clock_stable();
}
/*

View File

@ -251,13 +251,9 @@ void __init time_init(void)
static int __init init_cr16_clocksource(void)
{
/*
* The cr16 interval timers are not syncronized across CPUs, even if
* they share the same socket.
* The cr16 interval timers are not synchronized across CPUs.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
/* mark sched_clock unstable */
clear_sched_clock_stable();
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;

View File

@ -469,7 +469,7 @@ void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long o
* panic notifiers, and we should call panic
* directly from the location that we wish.
* e.g. We should not call panic from
* parisc_terminate, but rather the oter way around.
* parisc_terminate, but rather the other way around.
* This hack works, prints the panic message twice,
* and it enables reboot timers!
*/

View File

@ -253,7 +253,7 @@ dbl_fadd(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denomalized numbers. */
* with denormalized numbers. */
}
else
{

View File

@ -256,7 +256,7 @@ dbl_fsub(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denomalized numbers. */
* with denormalized numbers. */
}
else
{

View File

@ -249,7 +249,7 @@ sgl_fadd(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denomalized numbers. */
* with denormalized numbers. */
}
else
{

View File

@ -252,7 +252,7 @@ sgl_fsub(
return(NOEXCEPTION);
}
right_exponent = 1; /* Set exponent to reflect different bias
* with denomalized numbers. */
* with denormalized numbers. */
}
else
{

View File

@ -22,12 +22,15 @@
.macro cvdso_call funct call_time=0
.cfi_startproc
PPC_STLU r1, -PPC_MIN_STKFRM(r1)
.cfi_adjust_cfa_offset PPC_MIN_STKFRM
mflr r0
.cfi_register lr, r0
PPC_STLU r1, -PPC_MIN_STKFRM(r1)
.cfi_adjust_cfa_offset PPC_MIN_STKFRM
PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
.cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF
#ifdef __powerpc64__
PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
.cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
#endif
get_datapage r5
.ifeq \call_time
@ -39,13 +42,15 @@
PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
#ifdef __powerpc64__
PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
.cfi_restore r2
#endif
.ifeq \call_time
cmpwi r3, 0
.endif
mtlr r0
.cfi_restore lr
addi r1, r1, 2 * PPC_MIN_STKFRM
.cfi_restore lr
.cfi_def_cfa_offset 0
crclr so
.ifeq \call_time
beqlr+

View File

@ -462,7 +462,6 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
{
struct papr_scm_perf_stat *stat;
struct papr_scm_perf_stats *stats;
char *statid;
int index, rc, count;
u32 available_events;
@ -493,14 +492,12 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu
for (index = 0, stat = stats->scm_statistic, count = 0;
index < available_events; index++, ++stat) {
statid = kzalloc(strlen(stat->stat_id) + 1, GFP_KERNEL);
if (!statid) {
p->nvdimm_events_map[count] = kmemdup_nul(stat->stat_id, 8, GFP_KERNEL);
if (!p->nvdimm_events_map[count]) {
rc = -ENOMEM;
goto out_nvdimm_events_map;
}
strcpy(statid, stat->stat_id);
p->nvdimm_events_map[count] = statid;
count++;
}
p->nvdimm_events_map[count] = NULL;

View File

@ -27,22 +27,31 @@ struct vas_caps_entry {
/*
* This function is used to get the notification from the drmgr when
* QoS credits are changed. Though receiving the target total QoS
* credits here, get the official QoS capabilities from the hypervisor.
* QoS credits are changed.
*/
static ssize_t update_total_credits_trigger(struct vas_cop_feat_caps *caps,
static ssize_t update_total_credits_store(struct vas_cop_feat_caps *caps,
const char *buf, size_t count)
{
int err;
u16 creds;
err = kstrtou16(buf, 0, &creds);
/*
* The user space interface from the management console
* notifies OS with the new QoS credits and then the
* hypervisor. So OS has to use this new credits value
* and reconfigure VAS windows (close or reopen depends
* on the credits available) instead of depending on VAS
* QoS capabilities from the hypervisor.
*/
if (!err)
err = vas_reconfig_capabilties(caps->win_type);
err = vas_reconfig_capabilties(caps->win_type, creds);
if (err)
return -EINVAL;
pr_info("Set QoS total credits %u\n", creds);
return count;
}
@ -92,7 +101,7 @@ VAS_ATTR_RO(nr_total_credits);
VAS_ATTR_RO(nr_used_credits);
static struct vas_sysfs_entry update_total_credits_attribute =
__ATTR(update_total_credits, 0200, NULL, update_total_credits_trigger);
__ATTR(update_total_credits, 0200, NULL, update_total_credits_store);
static struct attribute *vas_def_capab_attrs[] = {
&nr_total_credits_attribute.attr,

View File

@ -779,10 +779,10 @@ static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
* changes. Reconfig window configurations based on the credits
* availability from this new capabilities.
*/
int vas_reconfig_capabilties(u8 type)
int vas_reconfig_capabilties(u8 type, int new_nr_creds)
{
struct vas_cop_feat_caps *caps;
int old_nr_creds, new_nr_creds;
int old_nr_creds;
struct vas_caps *vcaps;
int rc = 0, nr_active_wins;
@ -795,12 +795,6 @@ int vas_reconfig_capabilties(u8 type)
caps = &vcaps->caps;
mutex_lock(&vas_pseries_mutex);
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, vcaps->feat,
(u64)virt_to_phys(&hv_cop_caps));
if (rc)
goto out;
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
old_nr_creds = atomic_read(&caps->nr_total_credits);
@ -832,7 +826,6 @@ int vas_reconfig_capabilties(u8 type)
false);
}
out:
mutex_unlock(&vas_pseries_mutex);
return rc;
}
@ -850,7 +843,7 @@ static int pseries_vas_notifier(struct notifier_block *nb,
struct of_reconfig_data *rd = data;
struct device_node *dn = rd->dn;
const __be32 *intserv = NULL;
int len, rc = 0;
int new_nr_creds, len, rc = 0;
if ((action == OF_RECONFIG_ATTACH_NODE) ||
(action == OF_RECONFIG_DETACH_NODE))
@ -862,7 +855,15 @@ static int pseries_vas_notifier(struct notifier_block *nb,
if (!intserv)
return NOTIFY_OK;
rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE);
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
(u64)virt_to_phys(&hv_cop_caps));
if (!rc) {
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE,
new_nr_creds);
}
if (rc)
pr_err("Failed reconfig VAS capabilities with DLPAR\n");

View File

@ -135,7 +135,7 @@ struct pseries_vas_window {
};
int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps);
int vas_reconfig_capabilties(u8 type);
int vas_reconfig_capabilties(u8 type, int new_nr_creds);
int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps);
#ifdef CONFIG_PPC_VAS

View File

@ -208,8 +208,25 @@ static void __init setup_bootmem(void)
* early_init_fdt_reserve_self() since __pa() does
* not work for DTB pointers that are fixmap addresses
*/
if (!IS_ENABLED(CONFIG_BUILTIN_DTB))
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
if (!IS_ENABLED(CONFIG_BUILTIN_DTB)) {
/*
* In case the DTB is not located in a memory region we won't
* be able to locate it later on via the linear mapping and
* get a segfault when accessing it via __va(dtb_early_pa).
* To avoid this situation copy DTB to a memory region.
* Note that memblock_phys_alloc will also reserve DTB region.
*/
if (!memblock_is_memory(dtb_early_pa)) {
size_t fdt_size = fdt_totalsize(dtb_early_va);
phys_addr_t new_dtb_early_pa = memblock_phys_alloc(fdt_size, PAGE_SIZE);
void *new_dtb_early_va = early_memremap(new_dtb_early_pa, fdt_size);
memcpy(new_dtb_early_va, dtb_early_va, fdt_size);
early_memunmap(new_dtb_early_va, fdt_size);
_dtb_early_pa = new_dtb_early_pa;
} else
memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
}
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);

View File

@ -30,6 +30,16 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -ge, 1200, y), y)
ifeq ($(call cc-ifversion, -lt, 1300, y), y)
KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds)
KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds)
endif
endif
endif
UTS_MACHINE := s390x
STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
CHECKFLAGS += -D__s390__ -D__s390x__

View File

@ -2384,7 +2384,16 @@ static int kvm_s390_vm_mem_op(struct kvm *kvm, struct kvm_s390_mem_op *mop)
return -EINVAL;
if (mop->size > MEM_OP_MAX_SIZE)
return -E2BIG;
if (kvm_s390_pv_is_protected(kvm))
/*
* This is technically a heuristic only, if the kvm->lock is not
* taken, it is not guaranteed that the vm is/remains non-protected.
* This is ok from a kernel perspective, wrongdoing is detected
* on the access, -EFAULT is returned and the vm may crash the
* next time it accesses the memory in question.
* There is no sane usecase to do switching and a memop on two
* different CPUs at the same time.
*/
if (kvm_s390_pv_get_handle(kvm))
return -EINVAL;
if (mop->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION) {
if (access_key_invalid(mop->key))

View File

@ -1183,6 +1183,7 @@ EXPORT_SYMBOL_GPL(gmap_read_table);
static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
struct gmap_rmap *rmap)
{
struct gmap_rmap *temp;
void __rcu **slot;
BUG_ON(!gmap_is_shadow(sg));
@ -1190,6 +1191,12 @@ static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
if (slot) {
rmap->next = radix_tree_deref_slot_protected(slot,
&sg->guest_table_lock);
for (temp = rmap->next; temp; temp = temp->next) {
if (temp->raddr == rmap->raddr) {
kfree(rmap);
return;
}
}
radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
} else {
rmap->next = NULL;

View File

@ -41,17 +41,7 @@ struct fpu_state_config fpu_user_cfg __ro_after_init;
*/
struct fpstate init_fpstate __ro_after_init;
/*
* Track whether the kernel is using the FPU state
* currently.
*
* This flag is used:
*
* - by IRQ context code to potentially use the FPU
* if it's unused.
*
* - to debug kernel_fpu_begin()/end() correctness
*/
/* Track in-kernel FPU usage */
static DEFINE_PER_CPU(bool, in_kernel_fpu);
/*
@ -59,42 +49,37 @@ static DEFINE_PER_CPU(bool, in_kernel_fpu);
*/
DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
static bool kernel_fpu_disabled(void)
{
return this_cpu_read(in_kernel_fpu);
}
static bool interrupted_kernel_fpu_idle(void)
{
return !kernel_fpu_disabled();
}
/*
* Were we in user mode (or vm86 mode) when we were
* interrupted?
*
* Doing kernel_fpu_begin/end() is ok if we are running
* in an interrupt context from user mode - we'll just
* save the FPU state as required.
*/
static bool interrupted_user_mode(void)
{
struct pt_regs *regs = get_irq_regs();
return regs && user_mode(regs);
}
/*
* Can we use the FPU in kernel mode with the
* whole "kernel_fpu_begin/end()" sequence?
*
* It's always ok in process context (ie "not interrupt")
* but it is sometimes ok even from an irq.
*/
bool irq_fpu_usable(void)
{
return !in_interrupt() ||
interrupted_user_mode() ||
interrupted_kernel_fpu_idle();
if (WARN_ON_ONCE(in_nmi()))
return false;
/* In kernel FPU usage already active? */
if (this_cpu_read(in_kernel_fpu))
return false;
/*
* When not in NMI or hard interrupt context, FPU can be used in:
*
* - Task context except from within fpregs_lock()'ed critical
* regions.
*
* - Soft interrupt processing context which cannot happen
* while in a fpregs_lock()'ed critical region.
*/
if (!in_hardirq())
return true;
/*
* In hard interrupt context it's safe when soft interrupts
* are enabled, which means the interrupt did not hit in
* a fpregs_lock()'ed critical region.
*/
return !softirq_count();
}
EXPORT_SYMBOL(irq_fpu_usable);

View File

@ -887,6 +887,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid10_eax eax;
union cpuid10_edx edx;
if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
perf_get_x86_pmu_capability(&cap);
/*

View File

@ -473,30 +473,6 @@ static u64 __get_spte_lockless(u64 *sptep)
}
#endif
static bool spte_has_volatile_bits(u64 spte)
{
if (!is_shadow_present_pte(spte))
return false;
/*
* Always atomically update spte if it can be updated
* out of mmu-lock, it can ensure dirty bit is not lost,
* also, it can help us to get a stable is_writable_pte()
* to ensure tlb flush is not missed.
*/
if (spte_can_locklessly_be_made_writable(spte) ||
is_access_track_spte(spte))
return true;
if (spte_ad_enabled(spte)) {
if ((spte & shadow_accessed_mask) == 0 ||
(is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
return true;
}
return false;
}
/* Rules for using mmu_spte_set:
* Set the sptep from nonpresent to present.
* Note: the sptep being assigned *must* be either not present
@ -557,7 +533,7 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
* we always atomically update it, see the comments in
* spte_has_volatile_bits().
*/
if (spte_can_locklessly_be_made_writable(old_spte) &&
if (is_mmu_writable_spte(old_spte) &&
!is_writable_pte(new_spte))
flush = true;
@ -591,7 +567,8 @@ static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
u64 old_spte = *sptep;
int level = sptep_to_sp(sptep)->role.level;
if (!spte_has_volatile_bits(old_spte))
if (!is_shadow_present_pte(old_spte) ||
!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull);
else
old_spte = __update_clear_spte_slow(sptep, 0ull);
@ -1187,7 +1164,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
u64 spte = *sptep;
if (!is_writable_pte(spte) &&
!(pt_protect && spte_can_locklessly_be_made_writable(spte)))
!(pt_protect && is_mmu_writable_spte(spte)))
return false;
rmap_printk("spte %p %llx\n", sptep, *sptep);
@ -3196,8 +3173,7 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
* be removed in the fast path only if the SPTE was
* write-protected for dirty-logging or access tracking.
*/
if (fault->write &&
spte_can_locklessly_be_made_writable(spte)) {
if (fault->write && is_mmu_writable_spte(spte)) {
new_spte |= PT_WRITABLE_MASK;
/*

View File

@ -90,6 +90,34 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
E820_TYPE_RAM);
}
/*
* Returns true if the SPTE has bits that may be set without holding mmu_lock.
* The caller is responsible for checking if the SPTE is shadow-present, and
* for determining whether or not the caller cares about non-leaf SPTEs.
*/
bool spte_has_volatile_bits(u64 spte)
{
/*
* Always atomically update spte if it can be updated
* out of mmu-lock, it can ensure dirty bit is not lost,
* also, it can help us to get a stable is_writable_pte()
* to ensure tlb flush is not missed.
*/
if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
return true;
if (is_access_track_spte(spte))
return true;
if (spte_ad_enabled(spte)) {
if (!(spte & shadow_accessed_mask) ||
(is_writable_pte(spte) && !(spte & shadow_dirty_mask)))
return true;
}
return false;
}
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,

View File

@ -390,7 +390,7 @@ static inline void check_spte_writable_invariants(u64 spte)
"kvm: Writable SPTE is not MMU-writable: %llx", spte);
}
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
static inline bool is_mmu_writable_spte(u64 spte)
{
return spte & shadow_mmu_writable_mask;
}
@ -404,6 +404,8 @@ static inline u64 get_mmio_spte_generation(u64 spte)
return gen;
}
bool spte_has_volatile_bits(u64 spte);
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,

View File

@ -6,6 +6,7 @@
#include <linux/kvm_host.h>
#include "mmu.h"
#include "spte.h"
/*
* TDP MMU SPTEs are RCU protected to allow paging structures (non-leaf SPTEs)
@ -17,9 +18,38 @@ static inline u64 kvm_tdp_mmu_read_spte(tdp_ptep_t sptep)
{
return READ_ONCE(*rcu_dereference(sptep));
}
static inline void kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 val)
static inline u64 kvm_tdp_mmu_write_spte_atomic(tdp_ptep_t sptep, u64 new_spte)
{
WRITE_ONCE(*rcu_dereference(sptep), val);
return xchg(rcu_dereference(sptep), new_spte);
}
static inline void __kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 new_spte)
{
WRITE_ONCE(*rcu_dereference(sptep), new_spte);
}
static inline u64 kvm_tdp_mmu_write_spte(tdp_ptep_t sptep, u64 old_spte,
u64 new_spte, int level)
{
/*
* Atomically write the SPTE if it is a shadow-present, leaf SPTE with
* volatile bits, i.e. has bits that can be set outside of mmu_lock.
* The Writable bit can be set by KVM's fast page fault handler, and
* Accessed and Dirty bits can be set by the CPU.
*
* Note, non-leaf SPTEs do have Accessed bits and those bits are
* technically volatile, but KVM doesn't consume the Accessed bit of
* non-leaf SPTEs, i.e. KVM doesn't care if it clobbers the bit. This
* logic needs to be reassessed if KVM were to use non-leaf Accessed
* bits, e.g. to skip stepping down into child SPTEs when aging SPTEs.
*/
if (is_shadow_present_pte(old_spte) && is_last_spte(old_spte, level) &&
spte_has_volatile_bits(old_spte))
return kvm_tdp_mmu_write_spte_atomic(sptep, new_spte);
__kvm_tdp_mmu_write_spte(sptep, new_spte);
return old_spte;
}
/*

View File

@ -426,9 +426,9 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
tdp_mmu_unlink_sp(kvm, sp, shared);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
u64 *sptep = rcu_dereference(pt) + i;
tdp_ptep_t sptep = pt + i;
gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
u64 old_child_spte;
u64 old_spte;
if (shared) {
/*
@ -440,8 +440,8 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
* value to the removed SPTE value.
*/
for (;;) {
old_child_spte = xchg(sptep, REMOVED_SPTE);
if (!is_removed_spte(old_child_spte))
old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
if (!is_removed_spte(old_spte))
break;
cpu_relax();
}
@ -455,23 +455,43 @@ static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
* are guarded by the memslots generation, not by being
* unreachable.
*/
old_child_spte = READ_ONCE(*sptep);
if (!is_shadow_present_pte(old_child_spte))
old_spte = kvm_tdp_mmu_read_spte(sptep);
if (!is_shadow_present_pte(old_spte))
continue;
/*
* Marking the SPTE as a removed SPTE is not
* strictly necessary here as the MMU lock will
* stop other threads from concurrently modifying
* this SPTE. Using the removed SPTE value keeps
* the two branches consistent and simplifies
* the function.
* Use the common helper instead of a raw WRITE_ONCE as
* the SPTE needs to be updated atomically if it can be
* modified by a different vCPU outside of mmu_lock.
* Even though the parent SPTE is !PRESENT, the TLB
* hasn't yet been flushed, and both Intel and AMD
* document that A/D assists can use upper-level PxE
* entries that are cached in the TLB, i.e. the CPU can
* still access the page and mark it dirty.
*
* No retry is needed in the atomic update path as the
* sole concern is dropping a Dirty bit, i.e. no other
* task can zap/remove the SPTE as mmu_lock is held for
* write. Marking the SPTE as a removed SPTE is not
* strictly necessary for the same reason, but using
* the remove SPTE value keeps the shared/exclusive
* paths consistent and allows the handle_changed_spte()
* call below to hardcode the new value to REMOVED_SPTE.
*
* Note, even though dropping a Dirty bit is the only
* scenario where a non-atomic update could result in a
* functional bug, simply checking the Dirty bit isn't
* sufficient as a fast page fault could read the upper
* level SPTE before it is zapped, and then make this
* target SPTE writable, resume the guest, and set the
* Dirty bit between reading the SPTE above and writing
* it here.
*/
WRITE_ONCE(*sptep, REMOVED_SPTE);
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
REMOVED_SPTE, level);
}
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
old_child_spte, REMOVED_SPTE, level,
shared);
old_spte, REMOVED_SPTE, level, shared);
}
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@ -667,14 +687,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
KVM_PAGES_PER_HPAGE(iter->level));
/*
* No other thread can overwrite the removed SPTE as they
* must either wait on the MMU lock or use
* tdp_mmu_set_spte_atomic which will not overwrite the
* special removed SPTE value. No bookkeeping is needed
* here since the SPTE is going from non-present
* to non-present.
* No other thread can overwrite the removed SPTE as they must either
* wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
* overwrite the special removed SPTE value. No bookkeeping is needed
* here since the SPTE is going from non-present to non-present. Use
* the raw write helper to avoid an unnecessary check on volatile bits.
*/
kvm_tdp_mmu_write_spte(iter->sptep, 0);
__kvm_tdp_mmu_write_spte(iter->sptep, 0);
return 0;
}
@ -699,10 +718,13 @@ static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
* unless performing certain dirty logging operations.
* Leaving record_dirty_log unset in that case prevents page
* writes from being double counted.
*
* Returns the old SPTE value, which _may_ be different than @old_spte if the
* SPTE had voldatile bits.
*/
static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
u64 old_spte, u64 new_spte, gfn_t gfn, int level,
bool record_acc_track, bool record_dirty_log)
static u64 __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
u64 old_spte, u64 new_spte, gfn_t gfn, int level,
bool record_acc_track, bool record_dirty_log)
{
lockdep_assert_held_write(&kvm->mmu_lock);
@ -715,7 +737,7 @@ static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
*/
WARN_ON(is_removed_spte(old_spte) || is_removed_spte(new_spte));
kvm_tdp_mmu_write_spte(sptep, new_spte);
old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
__handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
@ -724,6 +746,7 @@ static void __tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
if (record_dirty_log)
handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
new_spte, level);
return old_spte;
}
static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
@ -732,9 +755,10 @@ static inline void _tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
{
WARN_ON_ONCE(iter->yielded);
__tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep, iter->old_spte,
new_spte, iter->gfn, iter->level,
record_acc_track, record_dirty_log);
iter->old_spte = __tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
iter->old_spte, new_spte,
iter->gfn, iter->level,
record_acc_track, record_dirty_log);
}
static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,

View File

@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
/* duplicated from amd_f17h_perfmon_event_map. */
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
/* amd_pmc_perf_hw_id depends on these being the same size */
static_assert(ARRAY_SIZE(amd_event_mapping) ==
ARRAY_SIZE(amd_f17h_event_mapping));
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
struct kvm_event_hw_type_mapping *event_mapping;
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
if (WARN_ON(pmc_is_fixed(pmc)))
return PERF_COUNT_HW_MAX;
if (guest_cpuid_family(pmc->vcpu) >= 0x17)
event_mapping = amd_f17h_event_mapping;
else
event_mapping = amd_event_mapping;
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask)
if (event_mapping[i].eventsel == event_select
&& event_mapping[i].unit_mask == unit_mask)
break;
if (i == ARRAY_SIZE(amd_event_mapping))
return PERF_COUNT_HW_MAX;
return amd_event_mapping[i].event_type;
return event_mapping[i].event_type;
}
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because

View File

@ -1594,24 +1594,51 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
atomic_set_release(&src_sev->migration_in_progress, 0);
}
/* vCPU mutex subclasses. */
enum sev_migration_role {
SEV_MIGRATION_SOURCE = 0,
SEV_MIGRATION_TARGET,
SEV_NR_MIGRATION_ROLES,
};
static int sev_lock_vcpus_for_migration(struct kvm *kvm)
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
enum sev_migration_role role)
{
struct kvm_vcpu *vcpu;
unsigned long i, j;
bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mutex_lock_killable(&vcpu->mutex))
if (mutex_lock_killable_nested(&vcpu->mutex, role))
goto out_unlock;
if (first) {
/*
* Reset the role to one that avoids colliding with
* the role used for the first vcpu mutex.
*/
role = SEV_NR_MIGRATION_ROLES;
first = false;
} else {
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
}
}
return 0;
out_unlock:
first = true;
kvm_for_each_vcpu(j, vcpu, kvm) {
if (i == j)
break;
if (first)
first = false;
else
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
mutex_unlock(&vcpu->mutex);
}
return -EINTR;
@ -1621,8 +1648,15 @@ static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (first)
first = false;
else
mutex_acquire(&vcpu->mutex.dep_map,
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
mutex_unlock(&vcpu->mutex);
}
}
@ -1748,10 +1782,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
charged = true;
}
ret = sev_lock_vcpus_for_migration(kvm);
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
if (ret)
goto out_dst_cgroup;
ret = sev_lock_vcpus_for_migration(source_kvm);
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
if (ret)
goto out_dst_vcpu;

View File

@ -5472,7 +5472,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
return vmx->emulation_required && !vmx->rmode.vm86_active &&
vcpu->arch.exception.pending;
(vcpu->arch.exception.pending || vcpu->arch.exception.injected);
}
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)

View File

@ -50,7 +50,6 @@
#include "blk-pm.h"
#include "blk-cgroup.h"
#include "blk-throttle.h"
#include "blk-rq-qos.h"
struct dentry *blk_debugfs_root;
@ -315,9 +314,6 @@ void blk_cleanup_queue(struct request_queue *q)
*/
blk_freeze_queue(q);
/* cleanup rq qos structures for queue without disk */
rq_qos_exit(q);
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
blk_sync_queue(q);

View File

@ -303,6 +303,7 @@ static struct atari_floppy_struct {
int ref;
int type;
struct blk_mq_tag_set tag_set;
int error_count;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
@ -705,14 +706,14 @@ static void fd_error( void )
if (!fd_request)
return;
fd_request->error_count++;
if (fd_request->error_count >= MAX_ERRORS) {
unit[SelectedDrive].error_count++;
if (unit[SelectedDrive].error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
finish_fdc();
return;
}
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
else if (unit[SelectedDrive].error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
@ -1491,7 +1492,7 @@ static void setup_req_params( int drive )
ReqData = ReqBuffer + 512 * ReqCnt;
if (UseTrackbuffer)
read_track = (ReqCmd == READ && fd_request->error_count == 0);
read_track = (ReqCmd == READ && unit[drive].error_count == 0);
else
read_track = 0;
@ -1520,6 +1521,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_RESOURCE;
}
fd_request = bd->rq;
unit[drive].error_count = 0;
blk_mq_start_request(fd_request);
atari_disable_irq( IRQ_MFP_FDC );

View File

@ -509,8 +509,8 @@ static unsigned long fdc_busy;
static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_WAIT_QUEUE_HEAD(command_done);
/* Errors during formatting are counted here. */
static int format_errors;
/* errors encountered on the current (or last) request */
static int floppy_errors;
/* Format request descriptor. */
static struct format_descr format_req;
@ -530,7 +530,6 @@ static struct format_descr format_req;
static char *floppy_track_buffer;
static int max_buffer_sectors;
static int *errors;
typedef void (*done_f)(int);
static const struct cont_t {
void (*interrupt)(void);
@ -1455,7 +1454,7 @@ static int interpret_errors(void)
if (drive_params[current_drive].flags & FTD_MSG)
DPRINT("Over/Underrun - retrying\n");
bad = 0;
} else if (*errors >= drive_params[current_drive].max_errors.reporting) {
} else if (floppy_errors >= drive_params[current_drive].max_errors.reporting) {
print_errors();
}
if (reply_buffer[ST2] & ST2_WC || reply_buffer[ST2] & ST2_BC)
@ -2095,7 +2094,7 @@ static void bad_flp_intr(void)
if (!next_valid_format(current_drive))
return;
}
err_count = ++(*errors);
err_count = ++floppy_errors;
INFBOUND(write_errors[current_drive].badness, err_count);
if (err_count > drive_params[current_drive].max_errors.abort)
cont->done(0);
@ -2241,9 +2240,8 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
return -EINVAL;
}
format_req = *tmp_format_req;
format_errors = 0;
cont = &format_cont;
errors = &format_errors;
floppy_errors = 0;
ret = wait_til_done(redo_format, true);
if (ret == -EINTR)
return -EINTR;
@ -2759,10 +2757,11 @@ static int set_next_request(void)
current_req = list_first_entry_or_null(&floppy_reqs, struct request,
queuelist);
if (current_req) {
current_req->error_count = 0;
floppy_errors = 0;
list_del_init(&current_req->queuelist);
return 1;
}
return current_req != NULL;
return 0;
}
/* Starts or continues processing request. Will automatically unlock the
@ -2821,7 +2820,6 @@ static void redo_fd_request(void)
_floppy = floppy_type + drive_params[current_drive].autodetect[drive_state[current_drive].probed_format];
} else
probing = 0;
errors = &(current_req->error_count);
tmp = make_raw_rw_request();
if (tmp < 2) {
request_done(tmp);

View File

@ -668,6 +668,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
unsigned long flags;
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@ -682,7 +683,9 @@ void fw_core_remove_card(struct fw_card *card)
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
spin_lock_irqsave(&card->lock, flags);
fw_destroy_nodes(card);
spin_unlock_irqrestore(&card->lock, flags);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);

View File

@ -1500,6 +1500,7 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
{
struct outbound_phy_packet_event *e =
container_of(packet, struct outbound_phy_packet_event, p);
struct client *e_client;
switch (status) {
/* expected: */
@ -1516,9 +1517,10 @@ static void outbound_phy_packet_callback(struct fw_packet *packet,
}
e->phy_packet.data[0] = packet->timestamp;
e_client = e->client;
queue_event(e->client, &e->event, &e->phy_packet,
sizeof(e->phy_packet) + e->phy_packet.length, NULL, 0);
client_put(e->client);
client_put(e_client);
}
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)

View File

@ -375,16 +375,13 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
{
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
card->local_node = NULL;
spin_unlock_irqrestore(&card->lock, flags);
}
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
@ -510,6 +507,8 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
struct fw_node *local_node;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
/*
* If the selfID buffer is not the immediate successor of the
* previously processed one, we cannot reliably compare the
@ -521,8 +520,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
card->bm_retries = 0;
}
spin_lock_irqsave(&card->lock, flags);
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*

View File

@ -73,24 +73,25 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
struct fw_transaction *t;
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t == transaction) {
if (!try_cancel_split_timeout(t)) {
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter == transaction) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
if (t) {
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@ -935,7 +936,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t;
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
u32 *data;
size_t data_length;
@ -947,20 +948,21 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(t, &card->transaction_list, link) {
if (t->node_id == source && t->tlabel == tlabel) {
if (!try_cancel_split_timeout(t)) {
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter->node_id == source && iter->tlabel == tlabel) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
list_del_init(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link == &card->transaction_list) {
if (!t) {
timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);

View File

@ -408,7 +408,7 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb;
struct sbp2_orb *orb = NULL, *iter;
struct sbp2_status status;
unsigned long flags;
@ -433,17 +433,18 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
/* Lookup the orb corresponding to this status write. */
spin_lock_irqsave(&lu->tgt->lock, flags);
list_for_each_entry(orb, &lu->orb_list, link) {
list_for_each_entry(iter, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
STATUS_GET_ORB_LOW(status) == orb->request_bus) {
orb->rcode = RCODE_COMPLETE;
list_del(&orb->link);
STATUS_GET_ORB_LOW(status) == iter->request_bus) {
iter->rcode = RCODE_COMPLETE;
list_del(&iter->link);
orb = iter;
break;
}
}
spin_unlock_irqrestore(&lu->tgt->lock, flags);
if (&orb->link != &lu->orb_list) {
if (orb) {
orb->callback(orb, &status);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {

View File

@ -871,13 +871,6 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvpwm->chip.dev = dev;
mvpwm->chip.ops = &mvebu_pwm_ops;
mvpwm->chip.npwm = mvchip->chip.ngpio;
/*
* There may already be some PWM allocated, so we can't force
* mvpwm->chip.base to a fixed point like mvchip->chip.base.
* So, we let pwmchip_add() do the numbering and take the next free
* region.
*/
mvpwm->chip.base = -1;
spin_lock_init(&mvpwm->lock);

View File

@ -762,11 +762,11 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
bitmap_xor(cur_stat, new_stat, old_stat, gc->ngpio);
bitmap_and(trigger, cur_stat, chip->irq_mask, gc->ngpio);
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
if (bitmap_empty(trigger, gc->ngpio))
return false;
bitmap_copy(chip->irq_stat, new_stat, gc->ngpio);
bitmap_and(cur_stat, chip->irq_trig_fall, old_stat, gc->ngpio);
bitmap_and(old_stat, chip->irq_trig_raise, new_stat, gc->ngpio);
bitmap_or(new_stat, old_stat, cur_stat, gc->ngpio);

View File

@ -130,7 +130,6 @@ static int visconti_gpio_probe(struct platform_device *pdev)
struct gpio_irq_chip *girq;
struct irq_domain *parent;
struct device_node *irq_parent;
struct fwnode_handle *fwnode;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@ -150,14 +149,12 @@ static int visconti_gpio_probe(struct platform_device *pdev)
}
parent = irq_find_host(irq_parent);
of_node_put(irq_parent);
if (!parent) {
dev_err(dev, "No IRQ parent domain\n");
return -ENODEV;
}
fwnode = of_node_to_fwnode(irq_parent);
of_node_put(irq_parent);
ret = bgpio_init(&priv->gpio_chip, dev, 4,
priv->base + GPIO_IDATA,
priv->base + GPIO_OSET,
@ -180,7 +177,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq = &priv->gpio_chip.irq;
girq->chip = irq_chip;
girq->fwnode = fwnode;
girq->fwnode = of_node_to_fwnode(dev->of_node);
girq->parent_domain = parent;
girq->child_to_parent_hwirq = visconti_gpio_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = visconti_gpio_populate_parent_fwspec;

View File

@ -910,7 +910,7 @@ static void of_gpiochip_init_valid_mask(struct gpio_chip *chip)
i, &start);
of_property_read_u32_index(np, "gpio-reserved-ranges",
i + 1, &count);
if (start >= chip->ngpio || start + count >= chip->ngpio)
if (start >= chip->ngpio || start + count > chip->ngpio)
continue;
bitmap_clear(chip->valid_mask, start, count);

View File

@ -24,6 +24,7 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
#include <xen/xen.h>
#include "amdgpu.h"
#include "amdgpu_ras.h"
@ -710,7 +711,8 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
if (!reg) {
if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
/* passthrough mode exclus sriov mod */
if (is_virtual_machine() && !xen_initial_domain())
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}

View File

@ -4440,7 +4440,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
&dpcd_pattern_type.value,
sizeof(dpcd_pattern_type));
channel_count = dpcd_test_mode.bits.channel_count + 1;
channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
// read pattern periods for requested channels when sawTooth pattern is requested
if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||

View File

@ -78,6 +78,7 @@ config DRM_ITE_IT6505
tristate "ITE IT6505 DisplayPort bridge"
depends on OF
select DRM_KMS_HELPER
select DRM_DP_HELPER
select EXTCON
help
ITE IT6505 DisplayPort bridge chip driver.

View File

@ -580,12 +580,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
mutex_unlock(&dp->event_mutex);
/*
* add fail safe mode outside event_mutex scope
* to avoid potiential circular lock with drm thread
*/
dp_panel_add_fail_safe_mode(dp->dp_display.connector);
/* uevent will complete connection part */
return 0;
};

View File

@ -151,15 +151,6 @@ static int dp_panel_update_modes(struct drm_connector *connector,
return rc;
}
void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
{
/* fail safe edid */
mutex_lock(&connector->dev->mode_config.mutex);
if (drm_add_modes_noedid(connector, 640, 480))
drm_set_preferred_mode(connector, 640, 480);
mutex_unlock(&connector->dev->mode_config.mutex);
}
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
@ -215,8 +206,6 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
rc = -ETIMEDOUT;
goto end;
}
dp_panel_add_fail_safe_mode(connector);
}
if (panel->aux_cfg_update_done) {

View File

@ -59,7 +59,6 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
int dp_panel_deinit(struct dp_panel *dp_panel);
int dp_panel_timing_cfg(struct dp_panel *dp_panel);
void dp_panel_dump_regs(struct dp_panel *dp_panel);
void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
struct drm_connector *connector);
u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,

View File

@ -2308,10 +2308,8 @@ irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
return NULL;
}
static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
{
struct irdma_cm_node *cm_node =
container_of(rcu_head, struct irdma_cm_node, rcu_head);
struct irdma_cm_core *cm_core = cm_node->cm_core;
struct irdma_qp *iwqp;
struct irdma_cm_info nfo;
@ -2359,7 +2357,6 @@ static void irdma_cm_node_free_cb(struct rcu_head *rcu_head)
}
cm_core->cm_free_ah(cm_node);
kfree(cm_node);
}
/**
@ -2387,8 +2384,9 @@ void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
/* wait for all list walkers to exit their grace period */
call_rcu(&cm_node->rcu_head, irdma_cm_node_free_cb);
irdma_destroy_connection(cm_node);
kfree_rcu(cm_node, rcu_head);
}
/**
@ -3246,15 +3244,10 @@ int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
*/
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
{
unsigned long flags;
if (!cm_core)
return;
spin_lock_irqsave(&cm_core->ht_lock, flags);
if (timer_pending(&cm_core->tcp_timer))
del_timer_sync(&cm_core->tcp_timer);
spin_unlock_irqrestore(&cm_core->ht_lock, flags);
del_timer_sync(&cm_core->tcp_timer);
destroy_workqueue(cm_core->event_wq);
cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
@ -3467,12 +3460,6 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
}
cm_id = iwqp->cm_id;
/* make sure we havent already closed this connection */
if (!cm_id) {
spin_unlock_irqrestore(&iwqp->lock, flags);
return;
}
original_hw_tcp_state = iwqp->hw_tcp_state;
original_ibqp_state = iwqp->ibqp_state;
last_ae = iwqp->last_aeq;
@ -3494,11 +3481,11 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
disconn_status = -ECONNRESET;
}
if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
last_ae == IRDMA_AE_BAD_CLOSE ||
last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
last_ae == IRDMA_AE_BAD_CLOSE ||
last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;

View File

@ -258,18 +258,16 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
u32 local_ipaddr[4] = {};
bool ipv4 = true;
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
real_dev = netdev;
ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
if (!ibdev)
return NOTIFY_DONE;
iwdev = to_iwdev(ibdev);
switch (event) {
case NETEVENT_NEIGH_UPDATE:
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
real_dev = netdev;
ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
if (!ibdev)
return NOTIFY_DONE;
iwdev = to_iwdev(ibdev);
p = (__be32 *)neigh->primary_key;
if (neigh->tbl->family == AF_INET6) {
ipv4 = false;
@ -290,13 +288,12 @@ int irdma_net_event(struct notifier_block *notifier, unsigned long event,
irdma_manage_arp_cache(iwdev->rf, neigh->ha,
local_ipaddr, ipv4,
IRDMA_ARP_DELETE);
ib_device_put(ibdev);
break;
default:
break;
}
ib_device_put(ibdev);
return NOTIFY_DONE;
}

View File

@ -1618,13 +1618,13 @@ int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
if (dont_wait) {
if (iwqp->cm_id && iwqp->hw_tcp_state) {
if (iwqp->hw_tcp_state) {
spin_lock_irqsave(&iwqp->lock, flags);
iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
iwqp->last_aeq = IRDMA_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags);
irdma_cm_disconn(iwqp);
}
irdma_cm_disconn(iwqp);
} else {
int close_timer_started;

View File

@ -38,13 +38,13 @@ static int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
}
/**
* rxe_mcast_delete - delete multicast address from rxe device
* rxe_mcast_del - delete multicast address from rxe device
* @rxe: rxe device object
* @mgid: multicast address as a gid
*
* Returns 0 on success else an error
*/
static int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
static int rxe_mcast_del(struct rxe_dev *rxe, union ib_gid *mgid)
{
unsigned char ll_addr[ETH_ALEN];
@ -143,11 +143,10 @@ static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
struct rxe_mcg *mcg;
unsigned long flags;
spin_lock_irqsave(&rxe->mcg_lock, flags);
spin_lock_bh(&rxe->mcg_lock);
mcg = __rxe_lookup_mcg(rxe, mgid);
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
spin_unlock_bh(&rxe->mcg_lock);
return mcg;
}
@ -159,17 +158,10 @@ struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
* @mcg: new mcg object
*
* Context: caller should hold rxe->mcg lock
* Returns: 0 on success else an error
*/
static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_mcg *mcg)
static void __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
struct rxe_mcg *mcg)
{
int err;
err = rxe_mcast_add(rxe, mgid);
if (unlikely(err))
return err;
kref_init(&mcg->ref_cnt);
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
INIT_LIST_HEAD(&mcg->qp_list);
@ -184,8 +176,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
*/
kref_get(&mcg->ref_cnt);
__rxe_insert_mcg(mcg);
return 0;
}
/**
@ -198,7 +188,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
struct rxe_mcg *mcg, *tmp;
unsigned long flags;
int err;
if (rxe->attr.max_mcast_grp == 0)
@ -209,36 +198,38 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
if (mcg)
return mcg;
/* speculative alloc of new mcg */
mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
return ERR_PTR(-ENOMEM);
spin_lock_irqsave(&rxe->mcg_lock, flags);
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
kfree(mcg);
mcg = tmp;
goto out;
}
/* check to see if we have reached limit */
if (atomic_inc_return(&rxe->mcg_num) > rxe->attr.max_mcast_grp) {
err = -ENOMEM;
goto err_dec;
}
err = __rxe_init_mcg(rxe, mgid, mcg);
if (err)
goto err_dec;
out:
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
return mcg;
/* speculative alloc of new mcg */
mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
return ERR_PTR(-ENOMEM);
spin_lock_bh(&rxe->mcg_lock);
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
spin_unlock_bh(&rxe->mcg_lock);
atomic_dec(&rxe->mcg_num);
kfree(mcg);
return tmp;
}
__rxe_init_mcg(rxe, mgid, mcg);
spin_unlock_bh(&rxe->mcg_lock);
/* add mcast address outside of lock */
err = rxe_mcast_add(rxe, mgid);
if (!err)
return mcg;
kfree(mcg);
err_dec:
atomic_dec(&rxe->mcg_num);
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
kfree(mcg);
return ERR_PTR(err);
}
@ -268,7 +259,6 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
__rxe_remove_mcg(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
rxe_mcast_delete(mcg->rxe, &mcg->mgid);
atomic_dec(&rxe->mcg_num);
}
@ -280,11 +270,12 @@ static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
*/
static void rxe_destroy_mcg(struct rxe_mcg *mcg)
{
unsigned long flags;
/* delete mcast address outside of lock */
rxe_mcast_del(mcg->rxe, &mcg->mgid);
spin_lock_irqsave(&mcg->rxe->mcg_lock, flags);
spin_lock_bh(&mcg->rxe->mcg_lock);
__rxe_destroy_mcg(mcg);
spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
spin_unlock_bh(&mcg->rxe->mcg_lock);
}
/**
@ -339,25 +330,24 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
{
struct rxe_dev *rxe = mcg->rxe;
struct rxe_mca *mca, *tmp;
unsigned long flags;
int err;
/* check to see if the qp is already a member of the group */
spin_lock_irqsave(&rxe->mcg_lock, flags);
spin_lock_bh(&rxe->mcg_lock);
list_for_each_entry(mca, &mcg->qp_list, qp_list) {
if (mca->qp == qp) {
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
spin_unlock_bh(&rxe->mcg_lock);
return 0;
}
}
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
spin_unlock_bh(&rxe->mcg_lock);
/* speculative alloc new mca without using GFP_ATOMIC */
mca = kzalloc(sizeof(*mca), GFP_KERNEL);
if (!mca)
return -ENOMEM;
spin_lock_irqsave(&rxe->mcg_lock, flags);
spin_lock_bh(&rxe->mcg_lock);
/* re-check to see if someone else just attached qp */
list_for_each_entry(tmp, &mcg->qp_list, qp_list) {
if (tmp->qp == qp) {
@ -371,7 +361,7 @@ static int rxe_attach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
if (err)
kfree(mca);
out:
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
spin_unlock_bh(&rxe->mcg_lock);
return err;
}
@ -405,9 +395,8 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
{
struct rxe_dev *rxe = mcg->rxe;
struct rxe_mca *mca, *tmp;
unsigned long flags;
spin_lock_irqsave(&rxe->mcg_lock, flags);
spin_lock_bh(&rxe->mcg_lock);
list_for_each_entry_safe(mca, tmp, &mcg->qp_list, qp_list) {
if (mca->qp == qp) {
__rxe_cleanup_mca(mca, mcg);
@ -421,13 +410,13 @@ static int rxe_detach_mcg(struct rxe_mcg *mcg, struct rxe_qp *qp)
if (atomic_read(&mcg->qp_num) <= 0)
__rxe_destroy_mcg(mcg);
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
spin_unlock_bh(&rxe->mcg_lock);
return 0;
}
}
/* we didn't find the qp on the list */
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
spin_unlock_bh(&rxe->mcg_lock);
return -EINVAL;
}

View File

@ -680,6 +680,11 @@ static struct resp_res *rxe_prepare_read_res(struct rxe_qp *qp,
* It is assumed that the access permissions if originally good
* are OK and the mappings to be unchanged.
*
* TODO: If someone reregisters an MR to change its size or
* access permissions during the processing of an RDMA read
* we should kill the responder resource and complete the
* operation with an error.
*
* Return: mr on success else NULL
*/
static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
@ -690,23 +695,27 @@ static struct rxe_mr *rxe_recheck_mr(struct rxe_qp *qp, u32 rkey)
if (rkey_is_mw(rkey)) {
mw = rxe_pool_get_index(&rxe->mw_pool, rkey >> 8);
if (!mw || mw->rkey != rkey)
if (!mw)
return NULL;
if (mw->state != RXE_MW_STATE_VALID) {
mr = mw->mr;
if (mw->rkey != rkey || mw->state != RXE_MW_STATE_VALID ||
!mr || mr->state != RXE_MR_STATE_VALID) {
rxe_put(mw);
return NULL;
}
mr = mw->mr;
rxe_get(mr);
rxe_put(mw);
} else {
mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
if (!mr || mr->rkey != rkey)
return NULL;
return mr;
}
if (mr->state != RXE_MR_STATE_VALID) {
mr = rxe_pool_get_index(&rxe->mr_pool, rkey >> 8);
if (!mr)
return NULL;
if (mr->rkey != rkey || mr->state != RXE_MR_STATE_VALID) {
rxe_put(mr);
return NULL;
}
@ -736,8 +745,14 @@ static enum resp_states read_reply(struct rxe_qp *qp,
}
if (res->state == rdatm_res_state_new) {
mr = qp->resp.mr;
qp->resp.mr = NULL;
if (!res->replay) {
mr = qp->resp.mr;
qp->resp.mr = NULL;
} else {
mr = rxe_recheck_mr(qp, res->read.rkey);
if (!mr)
return RESPST_ERR_RKEY_VIOLATION;
}
if (res->read.resid <= mtu)
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;

View File

@ -968,14 +968,15 @@ static void siw_accept_newconn(struct siw_cep *cep)
siw_cep_set_inuse(new_cep);
rv = siw_proc_mpareq(new_cep);
siw_cep_set_free(new_cep);
if (rv != -EAGAIN) {
siw_cep_put(cep);
new_cep->listen_cep = NULL;
if (rv)
if (rv) {
siw_cep_set_free(new_cep);
goto error;
}
}
siw_cep_set_free(new_cep);
}
return;

View File

@ -1384,13 +1384,17 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
/*
* Bump to HS timing and frequency. Some cards don't handle
* SEND_STATUS reliably at the initial frequency.
*/
mmc_set_timing(host, MMC_TIMING_MMC_HS);
mmc_set_bus_speed(card);
err = mmc_switch_status(card, true);
if (err)
goto out_err;
mmc_set_clock(host, card->ext_csd.hs_max_dtr);
/* Switch card to DDR with strobe bit */
val = EXT_CSD_DDR_BUS_WIDTH_8 | EXT_CSD_BUS_WIDTH_STROBE;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@ -1448,7 +1452,7 @@ static int mmc_select_hs400es(struct mmc_card *card)
static int mmc_select_hs200(struct mmc_card *card)
{
struct mmc_host *host = card->host;
unsigned int old_timing, old_signal_voltage;
unsigned int old_timing, old_signal_voltage, old_clock;
int err = -EINVAL;
u8 val;
@ -1479,8 +1483,17 @@ static int mmc_select_hs200(struct mmc_card *card)
false, true, MMC_CMD_RETRIES);
if (err)
goto err;
/*
* Bump to HS timing and frequency. Some cards don't handle
* SEND_STATUS reliably at the initial frequency.
* NB: We can't move to full (HS200) speeds until after we've
* successfully switched over.
*/
old_timing = host->ios.timing;
old_clock = host->ios.clock;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
mmc_set_clock(card->host, card->ext_csd.hs_max_dtr);
/*
* For HS200, CRC errors are not a reliable way to know the
@ -1493,8 +1506,10 @@ static int mmc_select_hs200(struct mmc_card *card)
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
*/
if (err == -EBADMSG)
if (err == -EBADMSG) {
mmc_set_clock(host, old_clock);
mmc_set_timing(host, old_timing);
}
}
err:
if (err) {

View File

@ -17,6 +17,7 @@
#include <linux/regulator/consumer.h>
#include <linux/interconnect.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
#include "sdhci-pltfm.h"
#include "cqhci.h"
@ -2482,6 +2483,43 @@ static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
}
static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
{
struct reset_control *reset;
int ret = 0;
reset = reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(reset))
return dev_err_probe(dev, PTR_ERR(reset),
"unable to acquire core_reset\n");
if (!reset)
return ret;
ret = reset_control_assert(reset);
if (ret) {
reset_control_put(reset);
return dev_err_probe(dev, ret, "core_reset assert failed\n");
}
/*
* The hardware requirement for delay between assert/deassert
* is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
* ~125us (4/32768). To be on the safe side add 200us delay.
*/
usleep_range(200, 210);
ret = reset_control_deassert(reset);
if (ret) {
reset_control_put(reset);
return dev_err_probe(dev, ret, "core_reset deassert failed\n");
}
usleep_range(200, 210);
reset_control_put(reset);
return ret;
}
static int sdhci_msm_probe(struct platform_device *pdev)
{
@ -2529,6 +2567,10 @@ static int sdhci_msm_probe(struct platform_device *pdev)
msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
ret = sdhci_msm_gcc_reset(&pdev->dev, host);
if (ret)
goto pltfm_free;
/* Setup SDCC bus voter clock. */
msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
if (!IS_ERR(msm_host->bus_clk)) {

View File

@ -377,8 +377,9 @@ static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host,
pdes[i].buf_addr_ptr1 =
cpu_to_le32(sg_dma_address(&data->sg[i]) >>
host->cfg->idma_des_shift);
pdes[i].buf_addr_ptr2 = cpu_to_le32((u32)next_desc >>
host->cfg->idma_des_shift);
pdes[i].buf_addr_ptr2 =
cpu_to_le32(next_desc >>
host->cfg->idma_des_shift);
}
pdes[0].config |= cpu_to_le32(SDXC_IDMAC_DES0_FD);

View File

@ -809,6 +809,9 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, offset;
if (priv->wol_ports_mask & BIT(port))
return;
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)

View File

@ -322,6 +322,7 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
struct ocelot_vcap_block *block_vcap_is2;
struct ocelot_vcap_filter *trap;
enum ocelot_mask_mode mask_mode;
unsigned long port_mask;
@ -341,9 +342,13 @@ static int felix_update_trapping_destinations(struct dsa_switch *ds,
/* We are sure that "cpu" was found, otherwise
* dsa_tree_setup_default_cpu() would have failed earlier.
*/
block_vcap_is2 = &ocelot->block[VCAP_IS2];
/* Make sure all traps are set up for that destination */
list_for_each_entry(trap, &ocelot->traps, trap_list) {
list_for_each_entry(trap, &block_vcap_is2->rules, list) {
if (!trap->is_trap)
continue;
/* Figure out the current trapping destination */
if (using_tag_8021q) {
/* Redirect to the tag_8021q CPU port. If timestamps

View File

@ -449,7 +449,7 @@ static int aq_pm_freeze(struct device *dev)
static int aq_pm_suspend_poweroff(struct device *dev)
{
return aq_suspend_common(dev, false);
return aq_suspend_common(dev, true);
}
static int aq_pm_thaw(struct device *dev)
@ -459,7 +459,7 @@ static int aq_pm_thaw(struct device *dev)
static int aq_pm_resume_restore(struct device *dev)
{
return atl_resume_common(dev, false);
return atl_resume_common(dev, true);
}
static const struct dev_pm_ops aq_pm_ops = {

View File

@ -544,7 +544,6 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
int *work_done, int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
bool is_rsc_completed = true;
int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
@ -562,12 +561,17 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
continue;
if (!buff->is_eop) {
unsigned int frag_cnt = 0U;
buff_ = buff;
do {
bool is_rsc_completed = true;
if (buff_->next >= self->size) {
err = -EIO;
goto err_exit;
}
frag_cnt++;
next_ = buff_->next,
buff_ = &self->buff_ring[next_];
is_rsc_completed =
@ -575,18 +579,17 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
next_,
self->hw_head);
if (unlikely(!is_rsc_completed))
break;
if (unlikely(!is_rsc_completed) ||
frag_cnt > MAX_SKB_FRAGS) {
err = 0;
goto err_exit;
}
buff->is_error |= buff_->is_error;
buff->is_cso_err |= buff_->is_cso_err;
} while (!buff_->is_eop);
if (!is_rsc_completed) {
err = 0;
goto err_exit;
}
if (buff->is_error ||
(buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
@ -644,7 +647,7 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
ALIGN(hdr_len, sizeof(long)));
if (buff->len - hdr_len > 0) {
skb_add_rx_frag(skb, 0, buff->rxdata.page,
skb_add_rx_frag(skb, i++, buff->rxdata.page,
buff->rxdata.pg_off + hdr_len,
buff->len - hdr_len,
self->frame_max);
@ -653,7 +656,6 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
if (!buff->is_eop) {
buff_ = buff;
i = 1U;
do {
next_ = buff_->next;
buff_ = &self->buff_ring[next_];

View File

@ -889,6 +889,13 @@ int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
err = -ENXIO;
goto err_exit;
}
/* Validate that the new hw_head_ is reasonable. */
if (hw_head_ >= ring->size) {
err = -ENXIO;
goto err_exit;
}
ring->hw_head = hw_head_;
err = aq_hw_err_from_flags(self);

View File

@ -3998,6 +3998,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
priv->wol_irq = platform_get_irq_optional(pdev, 2);
if (priv->wol_irq == -EPROBE_DEFER) {
err = priv->wol_irq;
goto err;
}
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {

View File

@ -2793,14 +2793,14 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
goto out;
na = ret;
memcpy(p->id, vpd + id, min_t(int, id_len, ID_LEN));
memcpy(p->id, vpd + id, min_t(unsigned int, id_len, ID_LEN));
strim(p->id);
memcpy(p->sn, vpd + sn, min_t(int, sn_len, SERNUM_LEN));
memcpy(p->sn, vpd + sn, min_t(unsigned int, sn_len, SERNUM_LEN));
strim(p->sn);
memcpy(p->pn, vpd + pn, min_t(int, pn_len, PN_LEN));
memcpy(p->pn, vpd + pn, min_t(unsigned int, pn_len, PN_LEN));
strim(p->pn);
memcpy(p->na, vpd + na, min_t(int, na_len, MACADDR_LEN));
strim((char *)p->na);
memcpy(p->na, vpd + na, min_t(unsigned int, na_len, MACADDR_LEN));
strim(p->na);
out:
vfree(vpd);

View File

@ -1399,8 +1399,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev)
if (!dev) {
pci_disable_device(pdev);
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
@ -1785,6 +1787,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_free_netdev:
free_netdev (dev);
pci_disable_device(pdev);
return -ENODEV;
}

View File

@ -7554,42 +7554,43 @@ static void i40e_free_macvlan_channels(struct i40e_vsi *vsi)
static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
struct i40e_fwd_adapter *fwd)
{
struct i40e_channel *ch = NULL, *ch_tmp, *iter;
int ret = 0, num_tc = 1, i, aq_err;
struct i40e_channel *ch, *ch_tmp;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
if (list_empty(&vsi->macvlan_list))
return -EINVAL;
/* Go through the list and find an available channel */
list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) {
if (!i40e_is_channel_macvlan(ch)) {
ch->fwd = fwd;
list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) {
if (!i40e_is_channel_macvlan(iter)) {
iter->fwd = fwd;
/* record configuration for macvlan interface in vdev */
for (i = 0; i < num_tc; i++)
netdev_bind_sb_channel_queue(vsi->netdev, vdev,
i,
ch->num_queue_pairs,
ch->base_queue);
for (i = 0; i < ch->num_queue_pairs; i++) {
iter->num_queue_pairs,
iter->base_queue);
for (i = 0; i < iter->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
u16 pf_q;
pf_q = ch->base_queue + i;
pf_q = iter->base_queue + i;
/* Get to TX ring ptr */
tx_ring = vsi->tx_rings[pf_q];
tx_ring->ch = ch;
tx_ring->ch = iter;
/* Get the RX ring ptr */
rx_ring = vsi->rx_rings[pf_q];
rx_ring->ch = ch;
rx_ring->ch = iter;
}
ch = iter;
break;
}
}
if (!ch)
return -EINVAL;
/* Guarantee all rings are updated before we update the
* MAC address filter.
*/

View File

@ -540,6 +540,7 @@ struct ice_pf {
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
struct tty_driver *ice_gnss_tty_driver;

View File

@ -37,14 +37,17 @@ void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
if (WARN_ON_ONCE(!in_task()))
return;
mutex_lock(&pf->adev_mutex);
if (!pf->adev)
return;
goto finish;
device_lock(&pf->adev->dev);
iadrv = ice_get_auxiliary_drv(pf);
if (iadrv && iadrv->event_handler)
iadrv->event_handler(pf, event);
device_unlock(&pf->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
}
/**
@ -275,7 +278,6 @@ int ice_plug_aux_dev(struct ice_pf *pf)
return -ENOMEM;
adev = &iadev->adev;
pf->adev = adev;
iadev->pf = pf;
adev->id = pf->aux_idx;
@ -285,18 +287,20 @@ int ice_plug_aux_dev(struct ice_pf *pf)
ret = auxiliary_device_init(adev);
if (ret) {
pf->adev = NULL;
kfree(iadev);
return ret;
}
ret = auxiliary_device_add(adev);
if (ret) {
pf->adev = NULL;
auxiliary_device_uninit(adev);
return ret;
}
mutex_lock(&pf->adev_mutex);
pf->adev = adev;
mutex_unlock(&pf->adev_mutex);
return 0;
}
@ -305,12 +309,17 @@ int ice_plug_aux_dev(struct ice_pf *pf)
*/
void ice_unplug_aux_dev(struct ice_pf *pf)
{
if (!pf->adev)
return;
struct auxiliary_device *adev;
auxiliary_device_delete(pf->adev);
auxiliary_device_uninit(pf->adev);
mutex_lock(&pf->adev_mutex);
adev = pf->adev;
pf->adev = NULL;
mutex_unlock(&pf->adev_mutex);
if (adev) {
auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
}
/**

View File

@ -3785,6 +3785,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
@ -3863,6 +3864,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
mutex_init(&pf->adev_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);

View File

@ -2287,6 +2287,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
/**
* ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
* @hw: pointer to the hw struct
* @tx: PTP Tx tracker to clean up
*
* Loop through the Tx timestamp requests and see if any of them have been
@ -2295,7 +2296,7 @@ ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
* timestamp will never be captured. This might happen if the packet gets
* discarded before it reaches the PHY timestamping block.
*/
static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
{
u8 idx;
@ -2304,11 +2305,16 @@ static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
for_each_set_bit(idx, tx->in_use, tx->len) {
struct sk_buff *skb;
u64 raw_tstamp;
/* Check if this SKB has been waiting for too long */
if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
continue;
/* Read tstamp to be able to use this register again */
ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
&raw_tstamp);
spin_lock(&tx->lock);
skb = tx->tstamps[idx].skb;
tx->tstamps[idx].skb = NULL;
@ -2330,7 +2336,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
ice_ptp_update_cached_phctime(pf);
ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
/* Run twice a second */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,

View File

@ -1289,13 +1289,52 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
NULL, 0);
}
/**
* ice_vf_vsi_dis_single_txq - disable a single Tx queue
* @vf: VF to disable queue for
* @vsi: VSI for the VF
* @q_id: VF relative (0-based) queue ID
*
* Attempt to disable the Tx queue passed in. If the Tx queue was successfully
* disabled then clear q_id bit in the enabled queues bitmap and return
* success. Otherwise return error.
*/
static int
ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
{
struct ice_txq_meta txq_meta = { 0 };
struct ice_tx_ring *ring;
int err;
if (!test_bit(q_id, vf->txq_ena))
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
q_id, vsi->vsi_num);
ring = vsi->tx_rings[q_id];
if (!ring)
return -EINVAL;
ice_fill_txq_meta(vsi, ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
q_id, vsi->vsi_num);
return err;
}
/* Clear enabled queues flag */
clear_bit(q_id, vf->txq_ena);
return 0;
}
/**
* ice_vc_dis_qs_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
* called from the VF to disable all or specific
* queue(s)
* called from the VF to disable all or specific queue(s)
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
@ -1332,30 +1371,15 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(vf_q_id, vf->txq_ena))
dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
vf_q_id, vsi->vsi_num);
ice_fill_txq_meta(vsi, ring, &txq_meta);
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->txq_ena);
}
}
@ -1604,6 +1628,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
/* Disable any existing queue first */
if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Configure a queue with the requested settings */
if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;

View File

@ -715,7 +715,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
int i, k;
memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
if (!IS_ENABLED(CONFIG_SOC_MT7621))
return;

View File

@ -568,10 +568,8 @@ static int
mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
struct __ip6_tnl_parm parms6;
parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, &parms6.raddr,
return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp,
&ipip_entry->parms.daddr.addr6,
&ipip_entry->dip_kvdl_index);
}
@ -579,10 +577,7 @@ static void
mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_entry *ipip_entry)
{
struct __ip6_tnl_parm parms6;
parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
mlxsw_sp_ipv6_addr_put(mlxsw_sp, &parms6.raddr);
mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipip_entry->parms.daddr.addr6);
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {

View File

@ -1622,7 +1622,7 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
trap->action.port_mask = 0;
trap->take_ts = take_ts;
list_add_tail(&trap->trap_list, &ocelot->traps);
trap->is_trap = true;
new = true;
}
@ -1634,10 +1634,8 @@ int ocelot_trap_add(struct ocelot *ocelot, int port,
err = ocelot_vcap_filter_replace(ocelot, trap);
if (err) {
trap->ingress_port_mask &= ~BIT(port);
if (!trap->ingress_port_mask) {
list_del(&trap->trap_list);
if (!trap->ingress_port_mask)
kfree(trap);
}
return err;
}
@ -1657,11 +1655,8 @@ int ocelot_trap_del(struct ocelot *ocelot, int port, unsigned long cookie)
return 0;
trap->ingress_port_mask &= ~BIT(port);
if (!trap->ingress_port_mask) {
list_del(&trap->trap_list);
if (!trap->ingress_port_mask)
return ocelot_vcap_filter_del(ocelot, trap);
}
return ocelot_vcap_filter_replace(ocelot, trap);
}

Some files were not shown because too many files have changed in this diff Show More