Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

include/net/sock.h
  7b50ecfcc6 ("net: Rename ->stream_memory_read to ->sock_is_readable")
  4c1e34c0db ("vsock: Enable y2038 safe timeval for timeout")

drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
  0daa55d033 ("octeontx2-af: cn10k: debugfs for dumping LMTST map table")
  e77bcdd1f6 ("octeontx2-af: Display all enabled PF VF rsrc_alloc entries.")

Adjacent code addition in both cases, keep both.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-10-28 10:43:58 -07:00
commit 7df621a3ee
164 changed files with 1615 additions and 934 deletions

View File

@ -32,13 +32,13 @@ properties:
"#size-cells":
const: 1
pinctrl:
$ref: ../pinctrl/brcm,ns-pinmux.yaml
patternProperties:
'^clock-controller@[a-f0-9]+$':
$ref: ../clock/brcm,iproc-clocks.yaml
'^pin-controller@[a-f0-9]+$':
$ref: ../pinctrl/brcm,ns-pinmux.yaml
'^thermal@[a-f0-9]+$':
$ref: ../thermal/brcm,ns-thermal.yaml
@ -73,9 +73,10 @@ examples:
"iprocfast", "sata1", "sata2";
};
pinctrl {
pin-controller@1c0 {
compatible = "brcm,bcm4708-pinmux";
offset = <0x1c0>;
reg = <0x1c0 0x24>;
reg-names = "cru_gpio_control";
};
thermal@2c0 {

View File

@ -17,9 +17,6 @@ description:
A list of pins varies across chipsets so few bindings are available.
Node of the pinmux must be nested in the CRU (Central Resource Unit) "syscon"
node.
properties:
compatible:
enum:
@ -27,10 +24,11 @@ properties:
- brcm,bcm4709-pinmux
- brcm,bcm53012-pinmux
offset:
description: offset of pin registers in the CRU block
reg:
maxItems: 1
$ref: /schemas/types.yaml#/definitions/uint32-array
reg-names:
const: cru_gpio_control
patternProperties:
'-pins$':
@ -72,23 +70,20 @@ allOf:
uart1_grp ]
required:
- offset
- reg
- reg-names
additionalProperties: false
examples:
- |
cru@1800c100 {
compatible = "syscon", "simple-mfd";
reg = <0x1800c100 0x1a4>;
pin-controller@1800c1c0 {
compatible = "brcm,bcm4708-pinmux";
reg = <0x1800c1c0 0x24>;
reg-names = "cru_gpio_control";
pinctrl {
compatible = "brcm,bcm4708-pinmux";
offset = <0xc0>;
spi-pins {
function = "spi";
groups = "spi_grp";
};
spi-pins {
function = "spi";
groups = "spi_grp";
};
};

View File

@ -104,6 +104,7 @@ Code Seq# Include File Comments
'8' all SNP8023 advanced NIC card
<mailto:mcr@solidum.com>
';' 64-7F linux/vfio.h
'=' 00-3f uapi/linux/ptp_clock.h <mailto:richardcochran@gmail.com>
'@' 00-0F linux/radeonfb.h conflict!
'@' 00-0F drivers/video/aty/aty128fb.c conflict!
'A' 00-1F linux/apm_bios.h conflict!

View File

@ -5464,6 +5464,19 @@ F: include/net/devlink.h
F: include/uapi/linux/devlink.h
F: net/core/devlink.c
DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT
M: Christoph Niedermaier <cniedermaier@dh-electronics.com>
L: kernel@dh-electronics.com
S: Maintained
F: arch/arm/boot/dts/imx6*-dhcom-*
DH ELECTRONICS STM32MP1 DHCOM/DHCOR BOARD SUPPORT
M: Marek Vasut <marex@denx.de>
L: kernel@dh-electronics.com
S: Maintained
F: arch/arm/boot/dts/stm32mp1*-dhcom-*
F: arch/arm/boot/dts/stm32mp1*-dhcor-*
DIALOG SEMICONDUCTOR DRIVERS
M: Support Opensource <support.opensource@diasemi.com>
S: Supported
@ -11284,7 +11297,6 @@ F: Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
F: drivers/net/ethernet/marvell/octeontx2/af/
MARVELL PRESTERA ETHERNET SWITCH DRIVER
M: Vadym Kochan <vkochan@marvell.com>
M: Taras Chornyi <tchornyi@marvell.com>
S: Supported
W: https://github.com/Marvell-switching/switchdev-prestera
@ -20352,6 +20364,7 @@ X86 ARCHITECTURE (32-BIT AND 64-BIT)
M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
M: Borislav Petkov <bp@alien8.de>
M: Dave Hansen <dave.hansen@linux.intel.com>
M: x86@kernel.org
R: "H. Peter Anvin" <hpa@zytor.com>
L: linux-kernel@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Opossums on Parade
# *DOCUMENTATION*

View File

@ -92,6 +92,7 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IRQ_TIME_ACCOUNTING

View File

@ -47,7 +47,10 @@ extern char * strchrnul(const char *, int);
#endif
#ifdef CONFIG_KERNEL_XZ
/* Prevent KASAN override of string helpers in decompressor */
#undef memmove
#define memmove memmove
#undef memcpy
#define memcpy memcpy
#include "../../../../lib/decompress_unxz.c"
#endif

View File

@ -112,7 +112,7 @@ &gmac {
pinctrl-names = "default";
pinctrl-0 = <&gmac_rgmii_pins>;
phy-handle = <&phy1>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
status = "okay";
};

View File

@ -176,6 +176,7 @@ extern int __get_user_64t_4(void *);
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
unsigned int __ua_flags = uaccess_save_and_enable(); \
int __tmp_e; \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@ -203,9 +204,10 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
__tmp_e = __e; \
uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
__e; \
__tmp_e; \
})
#define get_user(x, p) \

View File

@ -253,7 +253,7 @@ __create_page_tables:
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
#ifdef CONFIG_CPU_ENDIAN_BE8
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r8, [r5, #4] @ Save physical start of kernel (BE)
#else
str r8, [r5] @ Save physical start of kernel (LE)
@ -266,7 +266,7 @@ __create_page_tables:
bls 1b
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
#ifdef CONFIG_CPU_ENDIAN_BE8
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
str r3, [r5, #4] @ Save physical end of kernel (BE)
#else
str r3, [r5] @ Save physical end of kernel (LE)

View File

@ -136,7 +136,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
if (p >= bottom && p < top) {
unsigned long val;
if (get_kernel_nofault(val, (unsigned long *)p))
if (!get_kernel_nofault(val, (unsigned long *)p))
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");

View File

@ -40,6 +40,10 @@ SECTIONS
ARM_DISCARD
*(.alt.smp.init)
*(.pv_table)
#ifndef CONFIG_ARM_UNWIND
*(.ARM.exidx) *(.ARM.exidx.*)
*(.ARM.extab) *(.ARM.extab.*)
#endif
}
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
@ -172,7 +176,7 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA")
#endif
#ifdef CONFIG_ARM_MPU
#if defined(CONFIG_ARM_MPU) && !defined(CONFIG_COMPILE_TEST)
/*
* Due to PMSAv7 restriction on base address and size we have to
* enforce minimal alignment restrictions. It was seen that weaker

View File

@ -340,6 +340,7 @@ ENTRY(\name\()_cache_fns)
.macro define_tlb_functions name:req, flags_up:req, flags_smp
.type \name\()_tlb_fns, #object
.align 2
ENTRY(\name\()_tlb_fns)
.long \name\()_flush_user_tlb_range
.long \name\()_flush_kern_tlb_range

View File

@ -439,7 +439,7 @@ static struct undef_hook kprobes_arm_break_hook = {
#endif /* !CONFIG_THUMB2_KERNEL */
int __init arch_init_kprobes()
int __init arch_init_kprobes(void)
{
arm_probes_decode_init();
#ifdef CONFIG_THUMB2_KERNEL

View File

@ -75,7 +75,7 @@ &emac {
pinctrl-0 = <&emac_rgmii_pins>;
phy-supply = <&reg_gmac_3v3>;
phy-handle = <&ext_rgmii_phy>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
status = "okay";
};

View File

@ -70,7 +70,9 @@ reg_rst_eth2: regulator-rst-eth2 {
regulator-name = "rst-usb-eth2";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usb_eth2>;
gpio = <&gpio3 2 GPIO_ACTIVE_LOW>;
gpio = <&gpio3 2 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
};
reg_vdd_5v: regulator-5v {
@ -95,7 +97,7 @@ can0: can@0 {
clocks = <&osc_can>;
interrupt-parent = <&gpio4>;
interrupts = <28 IRQ_TYPE_EDGE_FALLING>;
spi-max-frequency = <100000>;
spi-max-frequency = <10000000>;
vdd-supply = <&reg_vdd_3v3>;
xceiver-supply = <&reg_vdd_5v>;
};
@ -111,7 +113,7 @@ &ecspi3 {
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-connection-type = "rgmii";
phy-connection-type = "rgmii-rxid";
phy-handle = <&ethphy>;
status = "okay";

View File

@ -91,10 +91,12 @@ regulators {
reg_vdd_soc: BUCK1 {
regulator-name = "buck1";
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-max-microvolt = <850000>;
regulator-boot-on;
regulator-always-on;
regulator-ramp-delay = <3125>;
nxp,dvs-run-voltage = <850000>;
nxp,dvs-standby-voltage = <800000>;
};
reg_vdd_arm: BUCK2 {
@ -111,7 +113,7 @@ reg_vdd_arm: BUCK2 {
reg_vdd_dram: BUCK3 {
regulator-name = "buck3";
regulator-min-microvolt = <850000>;
regulator-max-microvolt = <900000>;
regulator-max-microvolt = <950000>;
regulator-boot-on;
regulator-always-on;
};
@ -150,7 +152,7 @@ reg_nvcc_snvs: LDO1 {
reg_vdd_snvs: LDO2 {
regulator-name = "ldo2";
regulator-min-microvolt = <850000>;
regulator-min-microvolt = <800000>;
regulator-max-microvolt = <900000>;
regulator-boot-on;
regulator-always-on;

View File

@ -2590,9 +2590,10 @@ mdss: mdss@ae00000 {
power-domains = <&dispcc MDSS_GDSC>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&gcc GCC_DISP_HF_AXI_CLK>,
<&gcc GCC_DISP_SF_AXI_CLK>,
<&dispcc DISP_CC_MDSS_MDP_CLK>;
clock-names = "iface", "nrt_bus", "core";
clock-names = "iface", "bus", "nrt_bus", "core";
assigned-clocks = <&dispcc DISP_CC_MDSS_MDP_CLK>;
assigned-clock-rates = <460000000>;

View File

@ -1136,6 +1136,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
return prog;
}
u64 bpf_jit_alloc_exec_limit(void)
{
return BPF_JIT_REGION_SIZE;
}
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,

View File

@ -6,7 +6,7 @@
#ifndef CONFIG_DYNAMIC_FTRACE
extern void (*ftrace_trace_function)(unsigned long, unsigned long,
struct ftrace_ops*, struct pt_regs*);
struct ftrace_ops*, struct ftrace_regs*);
extern void ftrace_graph_caller(void);
noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,

View File

@ -37,6 +37,7 @@ config NIOS2_DTB_PHYS_ADDR
config NIOS2_DTB_SOURCE_BOOL
bool "Compile and link device tree into kernel image"
depends on !COMPILE_TEST
help
This allows you to specify a dts (device tree source) file
which will be compiled and linked into the kernel image.

View File

@ -125,7 +125,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (i == NR_JIT_ITERATIONS) {
pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
bpf_jit_binary_free(jit_data->header);
if (jit_data->header)
bpf_jit_binary_free(jit_data->header);
prog = orig_prog;
goto out_offset;
}
@ -166,6 +167,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
return prog;
}
u64 bpf_jit_alloc_exec_limit(void)
{
return BPF_JIT_REGION_SIZE;
}
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,

View File

@ -702,7 +702,8 @@ struct kvm_vcpu_arch {
struct kvm_pio_request pio;
void *pio_data;
void *guest_ins_data;
void *sev_pio_data;
unsigned sev_pio_count;
u8 event_exit_inst_len;

View File

@ -4596,10 +4596,10 @@ static void update_pkru_bitmask(struct kvm_mmu *mmu)
unsigned bit;
bool wp;
if (!is_cr4_pke(mmu)) {
mmu->pkru_mask = 0;
mmu->pkru_mask = 0;
if (!is_cr4_pke(mmu))
return;
}
wp = is_cr0_wp(mmu);

View File

@ -1484,6 +1484,13 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
goto e_free_trans;
}
/*
* Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
* encrypts the written data with the guest's key, and the cache may
* contain dirty, unencrypted data.
*/
sev_clflush_pages(guest_page, n);
/* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
data.guest_address |= sev_me_mask;

View File

@ -6305,18 +6305,13 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
/*
* If we are running L2 and L1 has a new pending interrupt
* which can be injected, we should re-evaluate
* what should be done with this new L1 interrupt.
* If L1 intercepts external-interrupts, we should
* exit from L2 to L1. Otherwise, interrupt should be
* delivered directly to L2.
* which can be injected, this may cause a vmexit or it may
* be injected into L2. Either way, this interrupt will be
* processed via KVM_REQ_EVENT, not RVI, because we do not use
* virtual interrupt delivery to inject L1 interrupts into L2.
*/
if (is_guest_mode(vcpu) && max_irr_updated) {
if (nested_exit_on_intr(vcpu))
kvm_vcpu_exiting_guest_mode(vcpu);
else
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
if (is_guest_mode(vcpu) && max_irr_updated)
kvm_make_request(KVM_REQ_EVENT, vcpu);
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
}

View File

@ -6906,7 +6906,7 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
}
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val,
unsigned short port,
unsigned int count, bool in)
{
vcpu->arch.pio.port = port;
@ -6914,10 +6914,8 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
vcpu->arch.pio.count = count;
vcpu->arch.pio.size = size;
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
vcpu->arch.pio.count = 0;
if (!kernel_pio(vcpu, vcpu->arch.pio_data))
return 1;
}
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
@ -6929,26 +6927,39 @@ static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
return 0;
}
static int __emulator_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port, unsigned int count)
{
WARN_ON(vcpu->arch.pio.count);
memset(vcpu->arch.pio_data, 0, size * count);
return emulator_pio_in_out(vcpu, size, port, count, true);
}
static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
{
int size = vcpu->arch.pio.size;
unsigned count = vcpu->arch.pio.count;
memcpy(val, vcpu->arch.pio_data, size * count);
trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
vcpu->arch.pio.count = 0;
}
static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val, unsigned int count)
{
int ret;
if (vcpu->arch.pio.count) {
/* Complete previous iteration. */
} else {
int r = __emulator_pio_in(vcpu, size, port, count);
if (!r)
return r;
if (vcpu->arch.pio.count)
goto data_avail;
memset(vcpu->arch.pio_data, 0, size * count);
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
if (ret) {
data_avail:
memcpy(val, vcpu->arch.pio_data, size * count);
trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data);
vcpu->arch.pio.count = 0;
return 1;
/* Results already available, fall through. */
}
return 0;
WARN_ON(count != vcpu->arch.pio.count);
complete_emulator_pio_in(vcpu, val);
return 1;
}
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
@ -6963,9 +6974,15 @@ static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, const void *val,
unsigned int count)
{
int ret;
memcpy(vcpu->arch.pio_data, val, size * count);
trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data);
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
ret = emulator_pio_in_out(vcpu, size, port, count, false);
if (ret)
vcpu->arch.pio.count = 0;
return ret;
}
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
@ -9643,14 +9660,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
break;
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
if (vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
break;
}
if (vcpu->arch.apicv_active)
static_call(kvm_x86_sync_pir_to_irr)(vcpu);
}
}
/*
* Do this here before restoring debug registers on the host. And
@ -12368,44 +12385,81 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
}
EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
{
memcpy(vcpu->arch.guest_ins_data, vcpu->arch.pio_data,
vcpu->arch.pio.count * vcpu->arch.pio.size);
vcpu->arch.pio.count = 0;
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port);
static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
{
int size = vcpu->arch.pio.size;
int port = vcpu->arch.pio.port;
vcpu->arch.pio.count = 0;
if (vcpu->arch.sev_pio_count)
return kvm_sev_es_outs(vcpu, size, port);
return 1;
}
static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count)
unsigned int port)
{
int ret;
for (;;) {
unsigned int count =
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
ret = emulator_pio_out_emulated(vcpu->arch.emulate_ctxt, size, port,
data, count);
if (ret)
return ret;
/* memcpy done already by emulator_pio_out. */
vcpu->arch.sev_pio_count -= count;
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
if (!ret)
break;
vcpu->arch.pio.count = 0;
/* Emulation done by the kernel. */
if (!vcpu->arch.sev_pio_count)
return 1;
}
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
return 0;
}
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count)
{
int ret;
unsigned int port);
ret = emulator_pio_in_emulated(vcpu->arch.emulate_ctxt, size, port,
data, count);
if (ret) {
vcpu->arch.pio.count = 0;
} else {
vcpu->arch.guest_ins_data = data;
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
static void advance_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
{
unsigned count = vcpu->arch.pio.count;
complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
vcpu->arch.sev_pio_count -= count;
vcpu->arch.sev_pio_data += count * vcpu->arch.pio.size;
}
static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
{
int size = vcpu->arch.pio.size;
int port = vcpu->arch.pio.port;
advance_sev_es_emulated_ins(vcpu);
if (vcpu->arch.sev_pio_count)
return kvm_sev_es_ins(vcpu, size, port);
return 1;
}
static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port)
{
for (;;) {
unsigned int count =
min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
if (!__emulator_pio_in(vcpu, size, port, count))
break;
/* Emulation done by the kernel. */
advance_sev_es_emulated_ins(vcpu);
if (!vcpu->arch.sev_pio_count)
return 1;
}
vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
return 0;
}
@ -12413,8 +12467,10 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
unsigned int port, void *data, unsigned int count,
int in)
{
return in ? kvm_sev_es_ins(vcpu, size, port, data, count)
: kvm_sev_es_outs(vcpu, size, port, data, count);
vcpu->arch.sev_pio_data = data;
vcpu->arch.sev_pio_count = count;
return in ? kvm_sev_es_ins(vcpu, size, port)
: kvm_sev_es_outs(vcpu, size, port);
}
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);

View File

@ -1897,10 +1897,11 @@ void blk_cgroup_bio_start(struct bio *bio)
{
int rwd = blk_cgroup_io_type(bio), cpu;
struct blkg_iostat_set *bis;
unsigned long flags;
cpu = get_cpu();
bis = per_cpu_ptr(bio->bi_blkg->iostat_cpu, cpu);
u64_stats_update_begin(&bis->sync);
flags = u64_stats_update_begin_irqsave(&bis->sync);
/*
* If the bio is flagged with BIO_CGROUP_ACCT it means this is a split
@ -1912,7 +1913,7 @@ void blk_cgroup_bio_start(struct bio *bio)
}
bis->cur.ios[rwd]++;
u64_stats_update_end(&bis->sync);
u64_stats_update_end_irqrestore(&bis->sync, flags);
if (cgroup_subsys_on_dfl(io_cgrp_subsys))
cgroup_rstat_updated(bio->bi_blkg->blkcg->css.cgroup, cpu);
put_cpu();

View File

@ -423,6 +423,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
device_del(pdev);
out_put:
put_device(pdev);
return ERR_PTR(err);
out_put_disk:
put_disk(disk);
return ERR_PTR(err);

View File

@ -1035,13 +1035,8 @@ void acpi_turn_off_unused_power_resources(void)
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
/*
* Turn off power resources in an unknown state too, because the
* platform firmware on some system expects the OS to turn off
* power resources without any users unconditionally.
*/
if (!resource->ref_count &&
resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
acpi_handle_debug(resource->device.handle, "Turning OFF\n");
__acpi_power_off(resource);
}

View File

@ -3896,8 +3896,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
return 1;
dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
return -EINVAL;
}
hpriv->hp_flags = hp_flags;

View File

@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
rbnode->block = blk;
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
GFP_KERNEL);
if (!present) {
kfree(blk);
if (!present)
return -ENOMEM;
}
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
}
/* update the rbnode block, its size and the base register */
rbnode->block = blk;
rbnode->blklen = blklen;
rbnode->base_reg = base_reg;
rbnode->cache_present = present;

View File

@ -13,6 +13,7 @@
#define _HYPERV_VMBUS_H
#include <linux/list.h>
#include <linux/bitops.h>
#include <asm/sync_bitops.h>
#include <asm/hyperv-tlfs.h>
#include <linux/atomic.h>

View File

@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
LS_DEVICE_NAME_MAX);
strscpy_pad(header->device_name,
dev_name(&query->port->agent->device->dev),
LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&

View File

@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
{
u64 reg;
struct pio_buf *pbuf;
LIST_HEAD(wake_list);
if (!sc)
return;
@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
while (!list_empty(&sc->piowait)) {
if (!list_empty(&sc->piowait))
list_move(&sc->piowait, &wake_list);
write_sequnlock(&sc->waitlock);
while (!list_empty(&wake_list)) {
struct iowait *wait;
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
wait = list_first_entry(&sc->piowait, struct iowait, list);
wait = list_first_entry(&wake_list, struct iowait, list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
write_sequnlock(&sc->waitlock);
spin_unlock_irq(&sc->alloc_lock);
}

View File

@ -1092,12 +1092,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *)((u8 *)cqe + 32);
get_64bit_val(ext_cqe, 24, &qword7);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, 24, &qword7);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}

View File

@ -3399,9 +3399,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
}
if (cq_poll_info->ud_vlan_valid) {
entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
entry->wc_flags |= IB_WC_WITH_VLAN;
u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
if (vlan) {
entry->vlan_id = vlan;
entry->wc_flags |= IB_WC_WITH_VLAN;
}
} else {
entry->sl = 0;
}

View File

@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
tc_node->enable = true;
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
if (ret)
if (ret) {
vsi->unregister_qset(vsi, tc_node);
goto reg_err;
}
}
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Using node %d which represents VSI %d TC %d\n",
@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
}
goto exit;
reg_err:
irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
list_del(&tc_node->siblings);
irdma_free_node(vsi, tc_node);
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
@ -369,11 +375,6 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
exit:
mutex_unlock(&vsi->dev->ws_mutex);
return ret;
reg_err:
mutex_unlock(&vsi->dev->ws_mutex);
irdma_ws_remove(vsi, user_pri);
return ret;
}
/**

View File

@ -1342,7 +1342,6 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
goto err_2;
}
mr->mmkey.type = MLX5_MKEY_MR;
mr->desc_size = sizeof(struct mlx5_mtt);
mr->umem = umem;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
kvfree(in);
@ -1536,6 +1535,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
}
xa_init(&mr->implicit_children);
odp->private = mr;
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);

View File

@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,

View File

@ -455,6 +455,7 @@ struct qedr_qp {
/* synchronization objects used with iwarp ep */
struct kref refcnt;
struct completion iwarp_cm_comp;
struct completion qp_rel_comp;
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};

View File

@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
{
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
kfree(qp);
complete(&qp->qp_rel_comp);
}
static void

View File

@ -1357,6 +1357,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
kref_init(&qp->refcnt);
init_completion(&qp->iwarp_cm_comp);
init_completion(&qp->qp_rel_comp);
}
qp->pd = pd;
@ -2857,8 +2858,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_free_qp_resources(dev, qp, udata);
if (rdma_protocol_iwarp(&dev->ibdev, 1))
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qedr_iw_qp_rem_ref(&qp->ibqp);
wait_for_completion(&qp->qp_rel_comp);
}
return 0;
}

View File

@ -602,7 +602,7 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
/*
* How many pages in this iovec element?
*/
static int qib_user_sdma_num_pages(const struct iovec *iov)
static size_t qib_user_sdma_num_pages(const struct iovec *iov)
{
const unsigned long addr = (unsigned long) iov->iov_base;
const unsigned long len = iov->iov_len;
@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
unsigned long addr, int tlen, int npages)
unsigned long addr, int tlen, size_t npages)
{
struct page *pages[8];
int i, j;
@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
unsigned long idx;
for (idx = 0; idx < niov; idx++) {
const int npages = qib_user_sdma_num_pages(iov + idx);
const size_t npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
int npages = 0;
int bytes_togo = 0;
size_t npages = 0;
size_t bytes_togo = 0;
int tiddma = 0;
int cfur;
@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
npages += qib_user_sdma_num_pages(&iov[idx]);
bytes_togo += slen;
if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
bytes_togo > type_max(typeof(pkt->bytes_togo))) {
ret = -EINVAL;
goto free_pbc;
}
pktnwc += slen >> 2;
idx++;
nfrags++;
@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
}
if (frag_size) {
int tidsmsize, n;
size_t pktsize;
size_t tidsmsize, n, pktsize, sz, addrlimit;
n = npages*((2*PAGE_SIZE/frag_size)+1);
pktsize = struct_size(pkt, addr, n);
@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
else
tidsmsize = 0;
pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
if (check_add_overflow(pktsize, tidsmsize, &sz)) {
ret = -EINVAL;
goto free_pbc;
}
pkt = kmalloc(sz, GFP_KERNEL);
if (!pkt) {
ret = -ENOMEM;
goto free_pbc;
}
pkt->largepkt = 1;
pkt->frag_size = frag_size;
pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
&addrlimit) ||
addrlimit > type_max(typeof(pkt->addrlimit))) {
ret = -EINVAL;
goto free_pbc;
}
pkt->addrlimit = addrlimit;
if (tiddma) {
char *tidsm = (char *)pkt + pktsize;

View File

@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
spin_lock(&rdi->n_qps_lock);
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
spin_unlock(&rdi->n_qps_lock);
ret = ENOMEM;
ret = -ENOMEM;
goto bail_ip;
}

View File

@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
.buf_len = HNS3_DBG_READ_LEN,
.buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
.buf_len = HNS3_DBG_READ_LEN,
.buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
.buf_len = HNS3_DBG_READ_LEN,
.buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
@ -584,7 +584,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
{ "TAIL", 2 },
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "PKTNUM", 2 },
{ "PKTNUM", 5 },
{ "COPYBREAK", 2 },
{ "RING_EN", 2 },
{ "RX_RING_EN", 2 },
@ -687,7 +687,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "OFFSET", 2 },
{ "PKTNUM", 2 },
{ "PKTNUM", 5 },
{ "RING_EN", 2 },
{ "TX_RING_EN", 2 },
{ "BASE_ADDR", 10 },
@ -912,13 +912,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
}
static const struct hns3_dbg_item tx_bd_info_items[] = {
{ "BD_IDX", 5 },
{ "ADDRESS", 2 },
{ "BD_IDX", 2 },
{ "ADDRESS", 13 },
{ "VLAN_TAG", 2 },
{ "SIZE", 2 },
{ "T_CS_VLAN_TSO", 2 },
{ "OT_VLAN_TAG", 3 },
{ "TV", 2 },
{ "TV", 5 },
{ "OLT_VLAN_LEN", 2 },
{ "PAYLEN_OL4CS", 2 },
{ "BD_FE_SC_VLD", 2 },

View File

@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
struct hclge_dbg_bitmap_cmd *bitmap;
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u16 qset_id, qset_num;
int ret;
@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%04u %#x %#x %#x %#x\n",
qset_id, bitmap->bit0, bitmap->bit1,
bitmap->bit2, bitmap->bit3);
qset_id, req.bit0, req.bit1, req.bit2,
req.bit3);
}
return 0;
@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
struct hclge_dbg_bitmap_cmd *bitmap;
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pri_id, pri_num;
int ret;
@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
pri_id, bitmap->bit0, bitmap->bit1,
bitmap->bit2);
pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
struct hclge_dbg_bitmap_cmd *bitmap;
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
pg_id, bitmap->bit0, bitmap->bit1,
bitmap->bit2);
pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
struct hclge_dbg_bitmap_cmd *bitmap;
struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 port_id = 0;
int ret;
@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
bitmap->bit0);
req.bit0);
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
bitmap->bit1);
req.bit1);
return 0;
}

View File

@ -2930,33 +2930,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task, 0);
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task, 0);
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task, 0);
mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task,
delay_time);
mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
@ -3650,33 +3646,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
affinity_notify);
cpumask_copy(&hdev->affinity_mask, mask);
}
static void hclge_irq_affinity_release(struct kref *ref)
{
}
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
hdev->affinity_notify.notify = hclge_irq_affinity_notify;
hdev->affinity_notify.release = hclge_irq_affinity_release;
irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
&hdev->affinity_notify);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
@ -13233,7 +13210,7 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;

View File

@ -974,7 +974,6 @@ struct hclge_dev {
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
struct devlink *devlink;
};

View File

@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
&hdev->state))
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
@ -3899,7 +3902,7 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;

View File

@ -146,6 +146,7 @@ enum hclgevf_states {
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
HCLGEVF_STATE_ROCE_REGISTERED,
HCLGEVF_STATE_SERVICE_INITED,
/* task states */
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,

View File

@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
struct net_device *event_netdev, *netdev_tmp;
struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
goto lag_out;
}
rcu_read_lock();
for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
if (!netif_is_ice(netdev_tmp))
continue;
if (netdev_tmp && netdev_tmp != lag->netdev &&
lag->peer_netdev != netdev_tmp) {
dev_hold(netdev_tmp);
lag->peer_netdev = netdev_tmp;
}
}
rcu_read_unlock();
if (bonding_info->slave.state)
ice_lag_set_backup(lag);
else
@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
case NETDEV_BONDING_INFO:
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
ice_lag_unlink(lag, ptr);
break;
default:
break;
}

View File

@ -1929,6 +1929,9 @@ void ice_ptp_init(struct ice_pf *pf)
*/
void ice_ptp_release(struct ice_pf *pf)
{
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);

View File

@ -316,18 +316,85 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
static void get_lf_str_list(struct rvu_block block, int pcifunc,
char *lfs)
{
int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
if (lf >= block.lf.max)
break;
if (block.fn_map[lf] != pcifunc)
continue;
if (lf == prev_lf + 1) {
prev_lf = lf;
seq = 1;
continue;
}
if (seq)
len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
else
len += (len ? sprintf(lfs + len, ",%d", lf) :
sprintf(lfs + len, "%d", lf));
prev_lf = lf;
seq = 0;
}
if (seq)
len += sprintf(lfs + len, "-%d", prev_lf);
lfs[len] = '\0';
}
static int get_max_column_width(struct rvu *rvu)
{
int index, pf, vf, lf_str_size = 12, buf_size = 256;
struct rvu_block block;
u16 pcifunc;
char *buf;
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
pcifunc = pf << 10 | vf;
if (!pcifunc)
continue;
for (index = 0; index < BLK_COUNT; index++) {
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
get_lf_str_list(block, pcifunc, buf);
if (lf_str_size <= strlen(buf))
lf_str_size = strlen(buf) + 1;
}
}
}
kfree(buf);
return lf_str_size;
}
/* Dumps current provisioning status of all RVU block LFs */
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
int index, off = 0, flag = 0, go_back = 0, len = 0;
int index, off = 0, flag = 0, len = 0, i = 0;
struct rvu *rvu = filp->private_data;
int lf, pf, vf, pcifunc;
int bytes_not_copied = 0;
struct rvu_block block;
int bytes_not_copied;
int lf_str_size = 12;
int pf, vf, pcifunc;
int buf_size = 2048;
int lf_str_size;
char *lfs;
char *buf;
@ -339,6 +406,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
if (!buf)
return -ENOSPC;
/* Get the maximum width of a column */
lf_str_size = get_max_column_width(rvu);
lfs = kzalloc(lf_str_size, GFP_KERNEL);
if (!lfs) {
kfree(buf);
@ -352,65 +422,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
"%-*s", lf_str_size,
rvu->hw->block[index].name);
}
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
if (bytes_not_copied)
goto out;
i++;
*ppos += off;
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
off = 0;
flag = 0;
pcifunc = pf << 10 | vf;
if (!pcifunc)
continue;
if (vf) {
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
go_back = scnprintf(&buf[off],
buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
off = scnprintf(&buf[off],
buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
} else {
sprintf(lfs, "PF%d", pf);
go_back = scnprintf(&buf[off],
buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
off = scnprintf(&buf[off],
buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
}
off += go_back;
for (index = 0; index < BLKTYPE_MAX; index++) {
for (index = 0; index < BLK_COUNT; index++) {
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
len = 0;
lfs[len] = '\0';
for (lf = 0; lf < block.lf.max; lf++) {
if (block.fn_map[lf] != pcifunc)
continue;
get_lf_str_list(block, pcifunc, lfs);
if (strlen(lfs))
flag = 1;
len += sprintf(&lfs[len], "%d,", lf);
}
if (flag)
len--;
lfs[len] = '\0';
off += scnprintf(&buf[off], buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
if (!strlen(lfs))
go_back += lf_str_size;
}
if (!flag)
off -= go_back;
else
flag = 0;
off--;
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
if (flag) {
off += scnprintf(&buf[off],
buf_size - 1 - off, "\n");
bytes_not_copied = copy_to_user(buffer +
(i * off),
buf, off);
if (bytes_not_copied)
goto out;
i++;
*ppos += off;
}
}
}
bytes_not_copied = copy_to_user(buffer, buf, off);
out:
kfree(lfs);
kfree(buf);
if (bytes_not_copied)
return -EFAULT;
*ppos = off;
return off;
return *ppos;
}
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
@ -594,7 +668,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
if (cmd_buf)
ret = -EINVAL;
if (!strncmp(subtoken, "help", 4) || ret < 0) {
if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
@ -1809,6 +1883,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
u16 pcifunc;
char *str;
/* Ingress policers do not exist on all platforms */
if (!nix_hw->ipolicer)
return 0;
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
if (layer == BAND_PROF_INVAL_LAYER)
continue;
@ -1858,6 +1936,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
int layer;
char *str;
/* Ingress policers do not exist on all platforms */
if (!nix_hw->ipolicer)
return 0;
seq_puts(m, "\nBandwidth profile resource free count\n");
seq_puts(m, "=====================================\n");
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {

View File

@ -2583,6 +2583,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
return;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
if (!nix_hw)
return;
vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock);

View File

@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
struct sk_buff *skb;
int err;
elem_info->u.rdq.skb = NULL;
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
if (!skb)
return -ENOMEM;
/* Assume that wqe was previously zeroed. */
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
buf_len, DMA_FROM_DEVICE);
if (err)
@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
char *wqe;
char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
skb = elem_info->u.sdq.skb;
if (!skb)
return;
wqe = elem_info->elem;
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
skb = elem_info->u.rdq.skb;
memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
if (err) {
dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
goto out;
}
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
memset(wqe, 0, q->elem_size);
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
if (err)
dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
out:
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);

View File

@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
ret = -EINVAL;
goto cleanup;
}
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
DMA_BIT_MASK(64))) {
if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
DMA_BIT_MASK(32))) {
dev_warn(&tx->adapter->pdev->dev,
"lan743x_: No suitable DMA available\n");
ret = -ENOMEM;
goto cleanup;
}
}
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
PAGE_SIZE);
@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
index);
}
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
gfp_t gfp)
{
struct net_device *netdev = rx->adapter->netdev;
struct device *dev = &rx->adapter->pdev->dev;
@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
if (!skb)
return -ENOMEM;
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
if (lan743x_rx_init_ring_element(rx, rx->last_head,
GFP_ATOMIC | GFP_DMA)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
ret = -EINVAL;
goto cleanup;
}
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
DMA_BIT_MASK(64))) {
if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
DMA_BIT_MASK(32))) {
dev_warn(&rx->adapter->pdev->dev,
"lan743x_: No suitable DMA available\n");
ret = -ENOMEM;
goto cleanup;
}
}
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
PAGE_SIZE);
@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
ret = lan743x_rx_init_ring_element(rx, index);
ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
if (ret)
goto cleanup;
}
return 0;
cleanup:
netif_warn(rx->adapter, ifup, rx->adapter->netdev,
"Error allocating memory for LAN743x\n");
lan743x_rx_ring_cleanup(rx);
return ret;
}
@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
if (ret) {
netif_err(adapter, probe, adapter->netdev,
"lan743x_hardware_init returned %d\n", ret);
lan743x_pci_cleanup(adapter);
return ret;
}
/* open netdev when netdev is at running state while resume.

View File

@ -182,15 +182,21 @@ static int
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
unsigned int max_mtu;
struct nfp_bpf_vnic *bv;
struct bpf_prog *prog;
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return 0;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (new_mtu > max_mtu) {
nn_info(nn, "BPF offload active, MTU over %u not supported\n",
max_mtu);
if (nn->xdp_hw.prog) {
prog = nn->xdp_hw.prog;
} else {
bv = nn->app_priv;
prog = bv->tc_prog;
}
if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
return -EBUSY;
}
return 0;

View File

@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
unsigned int mtu);
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);

View File

@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
return 0;
}
bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
unsigned int mtu)
{
unsigned int fw_mtu, pkt_off;
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
pkt_off = min(prog->aux->max_pkt_offset, mtu);
return fw_mtu < pkt_off;
}
static int
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
unsigned int max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
if (fw_mtu < pkt_off) {
if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
return -EOPNOTSUPP;
}

View File

@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
if (ndev->phydev)
phy_stop(ndev->phydev);
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
netif_carrier_off(ndev);
@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
if (ndev->phydev)
phy_stop(ndev->phydev);
clk_disable_unprepare(pldat->clk);
return 0;

View File

@ -156,6 +156,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8129) },
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
{ PCI_VDEVICE(REALTEK, 0x8161) },
{ PCI_VDEVICE(REALTEK, 0x8162) },
{ PCI_VDEVICE(REALTEK, 0x8167) },
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },

View File

@ -243,62 +243,10 @@ static void phy_sanitize_settings(struct phy_device *phydev)
}
}
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u8 autoneg = cmd->base.autoneg;
u8 duplex = cmd->base.duplex;
u32 speed = cmd->base.speed;
if (cmd->base.phy_address != phydev->mdio.addr)
return -EINVAL;
linkmode_copy(advertising, cmd->link_modes.advertising);
/* We make sure that we don't pass unsupported values in to the PHY */
linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
(duplex != DUPLEX_HALF &&
duplex != DUPLEX_FULL)))
return -EINVAL;
phydev->autoneg = autoneg;
if (autoneg == AUTONEG_DISABLE) {
phydev->speed = speed;
phydev->duplex = duplex;
}
linkmode_copy(phydev->advertising, advertising);
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
phydev->advertising, autoneg == AUTONEG_ENABLE);
phydev->master_slave_set = cmd->base.master_slave_cfg;
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
/* Restart the PHY */
phy_start_aneg(phydev);
return 0;
}
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd)
{
mutex_lock(&phydev->lock);
linkmode_copy(cmd->link_modes.supported, phydev->supported);
linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
@ -317,6 +265,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.autoneg = phydev->autoneg;
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
cmd->base.eth_tp_mdix = phydev->mdix;
mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
@ -750,6 +699,37 @@ static int phy_check_link_status(struct phy_device *phydev)
return 0;
}
/**
* _phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
* Description: Sanitizes the settings (if we're not autonegotiating
* them), and then calls the driver's config_aneg function.
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
static int _phy_start_aneg(struct phy_device *phydev)
{
int err;
lockdep_assert_held(&phydev->lock);
if (!phydev->drv)
return -EIO;
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
err = phy_config_aneg(phydev);
if (err < 0)
return err;
if (phy_is_started(phydev))
err = phy_check_link_status(phydev);
return err;
}
/**
* phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
@ -763,21 +743,8 @@ int phy_start_aneg(struct phy_device *phydev)
{
int err;
if (!phydev->drv)
return -EIO;
mutex_lock(&phydev->lock);
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
err = phy_config_aneg(phydev);
if (err < 0)
goto out_unlock;
if (phy_is_started(phydev))
err = phy_check_link_status(phydev);
out_unlock:
err = _phy_start_aneg(phydev);
mutex_unlock(&phydev->lock);
return err;
@ -800,6 +767,61 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u8 autoneg = cmd->base.autoneg;
u8 duplex = cmd->base.duplex;
u32 speed = cmd->base.speed;
if (cmd->base.phy_address != phydev->mdio.addr)
return -EINVAL;
linkmode_copy(advertising, cmd->link_modes.advertising);
/* We make sure that we don't pass unsupported values in to the PHY */
linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
((speed != SPEED_1000 &&
speed != SPEED_100 &&
speed != SPEED_10) ||
(duplex != DUPLEX_HALF &&
duplex != DUPLEX_FULL)))
return -EINVAL;
mutex_lock(&phydev->lock);
phydev->autoneg = autoneg;
if (autoneg == AUTONEG_DISABLE) {
phydev->speed = speed;
phydev->duplex = duplex;
}
linkmode_copy(phydev->advertising, advertising);
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
phydev->advertising, autoneg == AUTONEG_ENABLE);
phydev->master_slave_set = cmd->base.master_slave_cfg;
phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
/* Restart the PHY */
_phy_start_aneg(phydev);
mutex_unlock(&phydev->lock);
return 0;
}
EXPORT_SYMBOL(phy_ethtool_ksettings_set);
/**
* phy_speed_down - set speed to lowest speed supported by both link partners
* @phydev: the phy_device struct

View File

@ -4122,6 +4122,12 @@ static int lan78xx_probe(struct usb_interface *intf,
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
/* Reject broken descriptors. */
if (dev->maxpacket == 0) {
ret = -ENODEV;
goto out4;
}
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;

View File

@ -1792,6 +1792,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
if (dev->maxpacket == 0) {
/* that is a broken device */
status = -ENODEV;
goto out4;
}

View File

@ -3833,7 +3833,6 @@ vmxnet3_suspend(struct device *device)
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;

View File

@ -1730,6 +1730,10 @@ static int netfront_resume(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
netif_tx_lock_bh(info->netdev);
netif_device_detach(info->netdev);
netif_tx_unlock_bh(info->netdev);
xennet_disconnect_backend(info);
return 0;
}
@ -2351,6 +2355,10 @@ static int xennet_connect(struct net_device *dev)
* domain a kick because we've probably just requeued some
* packets.
*/
netif_tx_lock_bh(np->netdev);
netif_device_attach(np->netdev);
netif_tx_unlock_bh(np->netdev);
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];

View File

@ -1006,11 +1006,11 @@ static u64 port100_get_command_type_mask(struct port100 *dev)
skb = port100_alloc_skb(dev, 0);
if (!skb)
return -ENOMEM;
return 0;
resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
if (IS_ERR(resp))
return PTR_ERR(resp);
return 0;
if (resp->len < 8)
mask = 0;

View File

@ -5,7 +5,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@ -13,7 +12,6 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#define FLAG_BCM4708 BIT(1)
@ -24,8 +22,7 @@ struct ns_pinctrl {
struct device *dev;
unsigned int chipset_flag;
struct pinctrl_dev *pctldev;
struct regmap *regmap;
u32 offset;
void __iomem *base;
struct pinctrl_desc pctldesc;
struct ns_pinctrl_group *groups;
@ -232,9 +229,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
unset |= BIT(pin_number);
}
regmap_read(ns_pinctrl->regmap, ns_pinctrl->offset, &tmp);
tmp = readl(ns_pinctrl->base);
tmp &= ~unset;
regmap_write(ns_pinctrl->regmap, ns_pinctrl->offset, tmp);
writel(tmp, ns_pinctrl->base);
return 0;
}
@ -266,13 +263,13 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
static int ns_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct of_device_id *of_id;
struct ns_pinctrl *ns_pinctrl;
struct pinctrl_desc *pctldesc;
struct pinctrl_pin_desc *pin;
struct ns_pinctrl_group *group;
struct ns_pinctrl_function *function;
struct resource *res;
int i;
ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
@ -290,18 +287,12 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
ns_pinctrl->regmap = syscon_node_to_regmap(of_get_parent(np));
if (IS_ERR(ns_pinctrl->regmap)) {
int err = PTR_ERR(ns_pinctrl->regmap);
dev_err(dev, "Failed to map pinctrl regs: %d\n", err);
return err;
}
if (of_property_read_u32(np, "offset", &ns_pinctrl->offset)) {
dev_err(dev, "Failed to get register offset\n");
return -ENOENT;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"cru_gpio_control");
ns_pinctrl->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ns_pinctrl->base)) {
dev_err(dev, "Failed to map pinctrl regs\n");
return PTR_ERR(ns_pinctrl->base);
}
memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));

View File

@ -840,6 +840,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
{
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
unsigned long flags;
u32 pin_reg, mask;
int i;
mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
BIT(WAKE_CNTRL_OFF_S4);
for (i = 0; i < desc->npins; i++) {
int pin = desc->pins[i].number;
const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
if (!pd)
continue;
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + i * 4);
pin_reg &= ~mask;
writel(pin_reg, gpio_dev->base + i * 4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
}
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@ -976,6 +1004,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_dev->pctrl);
}
/* Disable and mask interrupts */
amd_gpio_irq_init(gpio_dev);
girq = &gpio_dev->gc.irq;
girq->chip = &amd_gpio_irqchip;
/* This will let us handle the parent IRQ in the driver */

View File

@ -1644,8 +1644,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
struct stm32_pinctrl_group *g = pctl->groups;
int i;
for (i = g->pin; i < g->pin + pctl->ngroups; i++)
stm32_pinctrl_restore_gpio_regs(pctl, i);
for (i = 0; i < pctl->ngroups; i++, g++)
stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
return 0;
}

View File

@ -147,8 +147,8 @@ config RESET_OXNAS
bool
config RESET_PISTACHIO
bool "Pistachio Reset Driver" if COMPILE_TEST
default MACH_PISTACHIO
bool "Pistachio Reset Driver"
depends on MIPS || COMPILE_TEST
help
This enables the reset driver for ImgTec Pistachio SoCs.

View File

@ -38,7 +38,7 @@ static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
}
ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
!(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
if (ret) {
dev_err(data->dev, "time out on SATA/PCIe rescal\n");
return ret;

View File

@ -92,3 +92,29 @@ void __init socfpga_reset_init(void)
for_each_matching_node(np, socfpga_early_reset_dt_ids)
a10_reset_init(np);
}
/*
* The early driver is problematic, because it doesn't register
* itself as a driver. This causes certain device links to prevent
* consumer devices from probing. The hacky solution is to register
* an empty driver, whose only job is to attach itself to the reset
* manager and call probe.
*/
static const struct of_device_id socfpga_reset_dt_ids[] = {
{ .compatible = "altr,rst-mgr", },
{ /* sentinel */ },
};
static int reset_simple_probe(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver reset_socfpga_driver = {
.probe = reset_simple_probe,
.driver = {
.name = "socfpga-reset",
.of_match_table = socfpga_reset_dt_ids,
},
};
builtin_platform_driver(reset_socfpga_driver);

View File

@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
return tegra_bpmp_transfer(bpmp, &msg);
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return err;
if (msg.rx.ret)
return -EINVAL;
return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,

View File

@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
/* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
error = scsi_init_sense_cache(shost);

View File

@ -3736,7 +3736,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_lun = -1;
shost->unique_id = mrioc->id;
shost->max_channel = 1;
shost->max_channel = 0;
shost->max_id = 0xFFFFFFFF;
if (prot_mask >= 0)

View File

@ -431,7 +431,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
goto done_free_fcport;
done_free_fcport:
if (bsg_request->msgcode == FC_BSG_RPT_ELS)
if (bsg_request->msgcode != FC_BSG_RPT_ELS)
qla2x00_free_fcport(fcport);
done:
return rval;

View File

@ -4157,7 +4157,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ql_dbg_pci(ql_dbg_init, ha->pdev,
0xe0ee, "%s: failed alloc dsd\n",
__func__);
return 1;
return -ENOMEM;
}
ha->dif_bundle_kallocs++;

View File

@ -3319,8 +3319,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return 0;
goto out_unmap_unlock;
}
/* Does F/W have an IOCBs for this request */
@ -3445,10 +3444,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
prm.sg = NULL;
prm.req_cnt = 1;
/* Calculate number of entries and segments required */
if (qlt_pci_map_calc_cnt(&prm) != 0)
return -EAGAIN;
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
@ -3466,6 +3461,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
return 0;
}
/* Calculate number of entries and segments required */
if (qlt_pci_map_calc_cnt(&prm) != 0)
return -EAGAIN;
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
@ -3870,9 +3869,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->cmd_in_wq);
if (cmd->sg_mapped)
qlt_unmap_sg(cmd->vha, cmd);
if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha);

View File

@ -553,8 +553,10 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
module_put(sdev->host->hostt->module);
struct module *mod = sdev->host->hostt->module;
put_device(&sdev->sdev_gendev);
module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);

View File

@ -449,9 +449,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
unsigned long flags;
struct module *mod;
sdev = container_of(work, struct scsi_device, ew.work);
mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
parent = sdev->sdev_gendev.parent;
@ -502,11 +505,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
if (parent)
put_device(parent);
module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
/* Set module pointer as NULL in case of module unloading */
if (!try_module_get(sdp->host->hostt->module))
sdp->host->hostt->module = NULL;
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}

View File

@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
err = transport->set_param(conn, ev->u.set_param.param,
data, ev->u.set_param.len);
if ((conn->state == ISCSI_CONN_BOUND) ||
(conn->state == ISCSI_CONN_UP)) {
err = transport->set_param(conn, ev->u.set_param.param,

View File

@ -3683,7 +3683,12 @@ static int sd_resume(struct device *dev)
static int sd_resume_runtime(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
struct scsi_device *sdp = sdkp->device;
struct scsi_device *sdp;
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
sdp = sdkp->device;
if (sdp->ignore_media_change) {
/* clear the device's sense data */

View File

@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
foreach_vmbus_pkt(desc, channel) {
struct vstor_packet *packet = hv_pkt_data(desc);
struct storvsc_cmd_request *request = NULL;
u32 pktlen = hv_pkt_datalen(desc);
u64 rqst_id = desc->trans_id;
u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
stor_device->vmscsi_size_delta) {
dev_err(&device->device, "Invalid packet len\n");
if (pktlen < minlen) {
dev_err(&device->device,
"Invalid pkt: id=%llu, len=%u, minlen=%u\n",
rqst_id, pktlen, minlen);
continue;
}
@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
if (rqst_id == 0) {
/*
* storvsc_on_receive() looks at the vstor_packet in the message
* from the ring buffer. If the operation in the vstor_packet is
* COMPLETE_IO, then we call storvsc_on_io_completion(), and
* dereference the guest memory address. Make sure we don't call
* storvsc_on_io_completion() with a guest memory address that is
* zero if Hyper-V were to construct and send such a bogus packet.
* from the ring buffer.
*
* - If the operation in the vstor_packet is COMPLETE_IO, then
* we call storvsc_on_io_completion(), and dereference the
* guest memory address. Make sure we don't call
* storvsc_on_io_completion() with a guest memory address
* that is zero if Hyper-V were to construct and send such
* a bogus packet.
*
* - If the operation in the vstor_packet is FCHBA_DATA, then
* we call cache_wwn(), and access the data payload area of
* the packet (wwn_packet); however, there is no guarantee
* that the packet is big enough to contain such area.
* Future-proof the code by rejecting such a bogus packet.
*/
if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
dev_err(&device->device, "Invalid packet with ID of 0\n");
continue;
}

View File

@ -370,20 +370,6 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
{
/*
* To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
* address registers must be restored because the restore kernel can
* have used different addresses.
*/
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_H);
ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_H);
if (ufshcd_is_link_hibern8(hba)) {
int ret = ufshcd_uic_hibern8_exit(hba);
@ -463,6 +449,18 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
#ifdef CONFIG_PM_SLEEP
static int ufshcd_pci_restore(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
/* Force a full reset and restore */
ufshcd_set_link_off(hba);
return ufshcd_system_resume(dev);
}
#endif
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
* @pdev: pointer to PCI device handle
@ -546,9 +544,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
#ifdef CONFIG_PM_SLEEP
.suspend = ufshcd_system_suspend,
.resume = ufshcd_system_resume,
.freeze = ufshcd_system_suspend,
.thaw = ufshcd_system_resume,
.poweroff = ufshcd_system_suspend,
.restore = ufshcd_pci_restore,
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
#endif

View File

@ -134,7 +134,7 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
if (!master)
return -ENOMEM;
master->bus_num = dfl_dev->id;
master->bus_num = -1;
hw = spi_master_get_devdata(master);

View File

@ -48,7 +48,7 @@ static int altera_spi_probe(struct platform_device *pdev)
return err;
/* setup the master state. */
master->bus_num = pdev->id;
master->bus_num = -1;
if (pdata) {
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {

View File

@ -1716,12 +1716,13 @@ static int verify_controller_parameters(struct pl022 *pl022,
return -EINVAL;
}
} else {
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
dev_err(&pl022->adev->dev,
"Microwire half duplex mode requested,"
" but this is only available in the"
" ST version of PL022\n");
return -EINVAL;
return -EINVAL;
}
}
}
return 0;

View File

@ -1194,7 +1194,7 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
return 0;
}
static int tegra_slink_runtime_resume(struct device *dev)
static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);

View File

@ -80,6 +80,7 @@ struct vduse_dev {
struct vdpa_callback config_cb;
struct work_struct inject;
spinlock_t irq_lock;
struct rw_semaphore rwsem;
int minor;
bool broken;
bool connected;
@ -410,6 +411,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
if (domain->bounce_map)
vduse_domain_reset_bounce_map(domain);
down_write(&dev->rwsem);
dev->status = 0;
dev->driver_features = 0;
dev->generation++;
@ -443,6 +446,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
flush_work(&vq->inject);
flush_work(&vq->kick);
}
up_write(&dev->rwsem);
}
static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
@ -885,6 +890,23 @@ static void vduse_vq_irq_inject(struct work_struct *work)
spin_unlock_irq(&vq->irq_lock);
}
static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
struct work_struct *irq_work)
{
int ret = -EINVAL;
down_read(&dev->rwsem);
if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto unlock;
ret = 0;
queue_work(vduse_irq_wq, irq_work);
unlock:
up_read(&dev->rwsem);
return ret;
}
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@ -966,8 +988,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
break;
}
case VDUSE_DEV_INJECT_CONFIG_IRQ:
ret = 0;
queue_work(vduse_irq_wq, &dev->inject);
ret = vduse_dev_queue_irq_work(dev, &dev->inject);
break;
case VDUSE_VQ_SETUP: {
struct vduse_vq_config config;
@ -1053,9 +1074,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
if (index >= dev->vq_num)
break;
ret = 0;
index = array_index_nospec(index, dev->vq_num);
queue_work(vduse_irq_wq, &dev->vqs[index].inject);
ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
break;
}
default:
@ -1136,6 +1156,7 @@ static struct vduse_dev *vduse_dev_create(void)
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
spin_lock_init(&dev->irq_lock);
init_rwsem(&dev->rwsem);
INIT_WORK(&dev->inject, vduse_dev_irq_inject);
init_waitqueue_head(&dev->waitq);

View File

@ -576,7 +576,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
if (!indirect && vq->use_dma_api)
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
if (indirect) {

View File

@ -71,8 +71,6 @@
#define TCOBASE(p) ((p)->tco_res->start)
/* SMI Control and Enable Register */
#define SMI_EN(p) ((p)->smi_res->start)
#define TCO_EN (1 << 13)
#define GBL_SMI_EN (1 << 0)
#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
@ -357,12 +355,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
tmrval = seconds_to_ticks(p, t);
/*
* If TCO SMIs are off, the timer counts down twice before rebooting.
* Otherwise, the BIOS generally reboots when the SMI triggers.
*/
if (p->smi_res &&
(inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
/* For TCO v1 the timer counts down twice before rebooting */
if (p->iTCO_version == 1)
tmrval /= 2;
/* from the specs: */
@ -527,7 +521,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
* Disables TCO logic generating an SMI#
*/
val32 = inl(SMI_EN(p));
val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN(p));
}

View File

@ -119,7 +119,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
if (!iwdt)
return -ENOMEM;
iwdt->base = dev->platform_data;
iwdt->base = (void __iomem *)dev->platform_data;
/*
* Retrieve rate from a fixed clock from the device tree if

View File

@ -268,8 +268,12 @@ static int omap_wdt_probe(struct platform_device *pdev)
wdev->wdog.bootstatus = WDIOF_CARDRESET;
}
if (!early_enable)
if (early_enable) {
omap_wdt_start(&wdev->wdog);
set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
} else {
omap_wdt_disable(wdev);
}
ret = watchdog_register_device(&wdev->wdog);
if (ret) {

View File

@ -130,7 +130,7 @@ static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
if (gwdt->version == 0)
return readl(gwdt->control_base + SBSA_GWDT_WOR);
else
return readq(gwdt->control_base + SBSA_GWDT_WOR);
return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR);
}
static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
@ -138,7 +138,7 @@ static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
if (gwdt->version == 0)
writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
else
writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
}
/*
@ -411,4 +411,3 @@ MODULE_AUTHOR("Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>");
MODULE_AUTHOR("Al Stone <al.stone@linaro.org>");
MODULE_AUTHOR("Timur Tabi <timur@codeaurora.org>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRV_NAME);

View File

@ -358,7 +358,7 @@ int autofs_wait(struct autofs_sb_info *sbi,
qstr.len = strlen(p);
offset = p - name;
}
qstr.hash = full_name_hash(dentry, name, qstr.len);
qstr.hash = full_name_hash(dentry, qstr.name, qstr.len);
if (mutex_lock_interruptible(&sbi->wq_mutex)) {
kfree(name);

View File

@ -1121,6 +1121,9 @@ int fuse_init_fs_context_submount(struct fs_context *fsc);
*/
void fuse_conn_destroy(struct fuse_mount *fm);
/* Drop the connection and free the fuse mount */
void fuse_mount_destroy(struct fuse_mount *fm);
/**
* Add connection to control filesystem
*/

View File

@ -457,14 +457,6 @@ static void fuse_send_destroy(struct fuse_mount *fm)
}
}
static void fuse_put_super(struct super_block *sb)
{
struct fuse_mount *fm = get_fuse_mount_super(sb);
fuse_conn_put(fm->fc);
kfree(fm);
}
static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
{
stbuf->f_type = FUSE_SUPER_MAGIC;
@ -1003,7 +995,6 @@ static const struct super_operations fuse_super_operations = {
.evict_inode = fuse_evict_inode,
.write_inode = fuse_write_inode,
.drop_inode = generic_delete_inode,
.put_super = fuse_put_super,
.umount_begin = fuse_umount_begin,
.statfs = fuse_statfs,
.sync_fs = fuse_sync_fs,
@ -1424,20 +1415,17 @@ static int fuse_get_tree_submount(struct fs_context *fsc)
if (!fm)
return -ENOMEM;
fm->fc = fuse_conn_get(fc);
fsc->s_fs_info = fm;
sb = sget_fc(fsc, NULL, set_anon_super_fc);
if (IS_ERR(sb)) {
kfree(fm);
if (fsc->s_fs_info)
fuse_mount_destroy(fm);
if (IS_ERR(sb))
return PTR_ERR(sb);
}
fm->fc = fuse_conn_get(fc);
/* Initialize superblock, making @mp_fi its root */
err = fuse_fill_super_submount(sb, mp_fi);
if (err) {
fuse_conn_put(fc);
kfree(fm);
sb->s_fs_info = NULL;
deactivate_locked_super(sb);
return err;
}
@ -1569,8 +1557,6 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
{
struct fuse_fs_context *ctx = fsc->fs_private;
int err;
struct fuse_conn *fc;
struct fuse_mount *fm;
if (!ctx->file || !ctx->rootmode_present ||
!ctx->user_id_present || !ctx->group_id_present)
@ -1580,42 +1566,18 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc)
* Require mount to happen from the same user namespace which
* opened /dev/fuse to prevent potential attacks.
*/
err = -EINVAL;
if ((ctx->file->f_op != &fuse_dev_operations) ||
(ctx->file->f_cred->user_ns != sb->s_user_ns))
goto err;
return -EINVAL;
ctx->fudptr = &ctx->file->private_data;
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
err = -ENOMEM;
if (!fc)
goto err;
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
if (!fm) {
kfree(fc);
goto err;
}
fuse_conn_init(fc, fm, sb->s_user_ns, &fuse_dev_fiq_ops, NULL);
fc->release = fuse_free_conn;
sb->s_fs_info = fm;
err = fuse_fill_super_common(sb, ctx);
if (err)
goto err_put_conn;
return err;
/* file->private_data shall be visible on all CPUs after this */
smp_mb();
fuse_send_init(get_fuse_mount_super(sb));
return 0;
err_put_conn:
fuse_conn_put(fc);
kfree(fm);
sb->s_fs_info = NULL;
err:
return err;
}
/*
@ -1637,22 +1599,40 @@ static int fuse_get_tree(struct fs_context *fsc)
{
struct fuse_fs_context *ctx = fsc->fs_private;
struct fuse_dev *fud;
struct fuse_conn *fc;
struct fuse_mount *fm;
struct super_block *sb;
int err;
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
if (!fc)
return -ENOMEM;
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
if (!fm) {
kfree(fc);
return -ENOMEM;
}
fuse_conn_init(fc, fm, fsc->user_ns, &fuse_dev_fiq_ops, NULL);
fc->release = fuse_free_conn;
fsc->s_fs_info = fm;
if (ctx->fd_present)
ctx->file = fget(ctx->fd);
if (IS_ENABLED(CONFIG_BLOCK) && ctx->is_bdev) {
err = get_tree_bdev(fsc, fuse_fill_super);
goto out_fput;
goto out;
}
/*
* While block dev mount can be initialized with a dummy device fd
* (found by device name), normal fuse mounts can't
*/
err = -EINVAL;
if (!ctx->file)
return -EINVAL;
goto out;
/*
* Allow creating a fuse mount with an already initialized fuse
@ -1668,7 +1648,9 @@ static int fuse_get_tree(struct fs_context *fsc)
} else {
err = get_tree_nodev(fsc, fuse_fill_super);
}
out_fput:
out:
if (fsc->s_fs_info)
fuse_mount_destroy(fm);
if (ctx->file)
fput(ctx->file);
return err;
@ -1747,17 +1729,25 @@ static void fuse_sb_destroy(struct super_block *sb)
struct fuse_mount *fm = get_fuse_mount_super(sb);
bool last;
if (fm) {
if (sb->s_root) {
last = fuse_mount_remove(fm);
if (last)
fuse_conn_destroy(fm);
}
}
void fuse_mount_destroy(struct fuse_mount *fm)
{
fuse_conn_put(fm->fc);
kfree(fm);
}
EXPORT_SYMBOL(fuse_mount_destroy);
static void fuse_kill_sb_anon(struct super_block *sb)
{
fuse_sb_destroy(sb);
kill_anon_super(sb);
fuse_mount_destroy(get_fuse_mount_super(sb));
}
static struct file_system_type fuse_fs_type = {
@ -1775,6 +1765,7 @@ static void fuse_kill_sb_blk(struct super_block *sb)
{
fuse_sb_destroy(sb);
kill_block_super(sb);
fuse_mount_destroy(get_fuse_mount_super(sb));
}
static struct file_system_type fuseblk_fs_type = {

View File

@ -1394,12 +1394,13 @@ static void virtio_kill_sb(struct super_block *sb)
bool last;
/* If mount failed, we can still be called without any fc */
if (fm) {
if (sb->s_root) {
last = fuse_mount_remove(fm);
if (last)
virtio_fs_conn_destroy(fm);
}
kill_anon_super(sb);
fuse_mount_destroy(fm);
}
static int virtio_fs_test_super(struct super_block *sb,
@ -1455,19 +1456,14 @@ static int virtio_fs_get_tree(struct fs_context *fsc)
fsc->s_fs_info = fm;
sb = sget_fc(fsc, virtio_fs_test_super, set_anon_super_fc);
if (fsc->s_fs_info) {
fuse_conn_put(fc);
kfree(fm);
}
if (fsc->s_fs_info)
fuse_mount_destroy(fm);
if (IS_ERR(sb))
return PTR_ERR(sb);
if (!sb->s_root) {
err = virtio_fs_fill_super(sb, fsc);
if (err) {
fuse_conn_put(fc);
kfree(fm);
sb->s_fs_info = NULL;
deactivate_locked_super(sb);
return err;
}

Some files were not shown because too many files have changed in this diff Show More