mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
BPF alignment tests got a conflict because the registers are output as Rn_w instead of just Rn in net-next, and in net a fixup for a testcase prohibits logical operations on pointers before using them. Also, we should attempt to patch BPF call args if JIT always on is enabled. Instead, if we fail to JIT the subprogs we should pass an error back up and fail immediately. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
19d28fbd30
|
@ -9,6 +9,7 @@ Contents:
|
|||
batman-adv
|
||||
kapi
|
||||
z8530book
|
||||
msg_zerocopy
|
||||
|
||||
.. only:: subproject
|
||||
|
||||
|
@ -16,4 +17,3 @@ Contents:
|
|||
=======
|
||||
|
||||
* :ref:`genindex`
|
||||
|
||||
|
|
|
@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
|
|||
if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
|
||||
error(1, errno, "setsockopt zerocopy");
|
||||
|
||||
Setting the socket option only works when the socket is in its initial
|
||||
(TCP_CLOSED) state. Trying to set the option for a socket returned by accept(),
|
||||
for example, will lead to an EBUSY error. In this case, the option should be set
|
||||
to the listening socket and it will be inherited by the accepted sockets.
|
||||
|
||||
Transmission
|
||||
------------
|
||||
|
|
|
@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
|
|||
has_mt t0, 3f
|
||||
|
||||
.set push
|
||||
.set MIPS_ISA_LEVEL_RAW
|
||||
.set mt
|
||||
|
||||
/* Only allow 1 TC per VPE to execute... */
|
||||
|
@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
|
|||
#elif defined(CONFIG_MIPS_MT)
|
||||
|
||||
.set push
|
||||
.set MIPS_ISA_LEVEL_RAW
|
||||
.set mt
|
||||
|
||||
/* If the core doesn't support MT then return */
|
||||
|
|
|
@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
|
|||
struct task_struct *t;
|
||||
int max_users;
|
||||
|
||||
/* If nothing to change, return right away, successfully. */
|
||||
if (value == mips_get_process_fp_mode(task))
|
||||
return 0;
|
||||
|
||||
/* Only accept a mode change if 64-bit FP enabled for o32. */
|
||||
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* And only for o32 tasks. */
|
||||
if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Check the value is valid */
|
||||
if (value & ~known_bits)
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -419,25 +419,38 @@ static int gpr64_set(struct task_struct *target,
|
|||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
|
||||
* !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
|
||||
* correspond 1:1 to buffer slots. Only general registers are copied.
|
||||
*/
|
||||
static int fpr_get_fpa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
void **kbuf, void __user **ubuf)
|
||||
{
|
||||
unsigned i;
|
||||
int err;
|
||||
return user_regset_copyout(pos, count, kbuf, ubuf,
|
||||
&target->thread.fpu,
|
||||
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
|
||||
* CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
|
||||
* general register slots are copied to buffer slots. Only general
|
||||
* registers are copied.
|
||||
*/
|
||||
static int fpr_get_msa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
void **kbuf, void __user **ubuf)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 fpr_val;
|
||||
int err;
|
||||
|
||||
/* XXX fcr31 */
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu,
|
||||
0, sizeof(elf_fpregset_t));
|
||||
|
||||
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
|
||||
for (i = 0; i < NUM_FPU_REGS; i++) {
|
||||
fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
|
||||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
err = user_regset_copyout(pos, count, kbuf, ubuf,
|
||||
&fpr_val, i * sizeof(elf_fpreg_t),
|
||||
(i + 1) * sizeof(elf_fpreg_t));
|
||||
if (err)
|
||||
|
@ -447,27 +460,64 @@ static int fpr_get(struct task_struct *target,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int fpr_set(struct task_struct *target,
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
*/
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
unsigned i;
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
int err;
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
|
||||
err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
|
||||
else
|
||||
err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcr31,
|
||||
fcr31_pos, fcr31_pos + sizeof(u32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
|
||||
* !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
|
||||
* context's general register slots. Only general registers are copied.
|
||||
*/
|
||||
static int fpr_set_fpa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
const void **kbuf, const void __user **ubuf)
|
||||
{
|
||||
return user_regset_copyin(pos, count, kbuf, ubuf,
|
||||
&target->thread.fpu,
|
||||
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
|
||||
* CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
|
||||
* bits only of FP context's general register slots. Only general
|
||||
* registers are copied.
|
||||
*/
|
||||
static int fpr_set_msa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
const void **kbuf, const void __user **ubuf)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 fpr_val;
|
||||
|
||||
/* XXX fcr31 */
|
||||
|
||||
init_fp_ctx(target);
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu,
|
||||
0, sizeof(elf_fpregset_t));
|
||||
int err;
|
||||
|
||||
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
|
||||
for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
|
||||
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
|
||||
err = user_regset_copyin(pos, count, kbuf, ubuf,
|
||||
&fpr_val, i * sizeof(elf_fpreg_t),
|
||||
(i + 1) * sizeof(elf_fpreg_t));
|
||||
if (err)
|
||||
|
@ -478,6 +528,53 @@ static int fpr_set(struct task_struct *target,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
*
|
||||
* We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
|
||||
* which is supposed to have been guaranteed by the kernel before
|
||||
* calling us, e.g. in `ptrace_regset'. We enforce that requirement,
|
||||
* so that we can safely avoid preinitializing temporaries for
|
||||
* partial register writes.
|
||||
*/
|
||||
static int fpr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
u32 fcr31;
|
||||
int err;
|
||||
|
||||
BUG_ON(count % sizeof(elf_fpreg_t));
|
||||
|
||||
if (pos + count > sizeof(elf_fpregset_t))
|
||||
return -EIO;
|
||||
|
||||
init_fp_ctx(target);
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
|
||||
err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
|
||||
else
|
||||
err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (count > 0) {
|
||||
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&fcr31,
|
||||
fcr31_pos, fcr31_pos + sizeof(u32));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ptrace_setfcr31(target, fcr31);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
enum mips_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
CONFIG_SMP=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_PCIE_XILINX=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_CFS_BANDWIDTH=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_EXPERT=y
|
||||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
CONFIG_IP_PNP=y
|
||||
CONFIG_IP_PNP_DHCP=y
|
||||
CONFIG_IP_PNP_BOOTP=y
|
||||
CONFIG_IP_PNP_RARP=y
|
||||
CONFIG_NETLINK_DIAG=y
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_SATA_AHCI=y
|
||||
CONFIG_SATA_AHCI_PLATFORM=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
CONFIG_MACB=y
|
||||
CONFIG_E1000E=y
|
||||
CONFIG_R8169=y
|
||||
CONFIG_MICROSEMI_PHY=y
|
||||
CONFIG_INPUT_MOUSEDEV=y
|
||||
CONFIG_SERIAL_8250=y
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
# CONFIG_PTP_1588_CLOCK is not set
|
||||
CONFIG_DRM=y
|
||||
CONFIG_DRM_RADEON=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_XHCI_PLATFORM=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
CONFIG_USB_UAS=y
|
||||
CONFIG_VIRTIO_MMIO=y
|
||||
CONFIG_RAS=y
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_AUTOFS4_FS=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V4=y
|
||||
CONFIG_NFS_V4_1=y
|
||||
CONFIG_NFS_V4_2=y
|
||||
CONFIG_ROOT_NFS=y
|
||||
# CONFIG_RCU_TRACE is not set
|
||||
CONFIG_CRYPTO_USER_API_HASH=y
|
|
@ -17,10 +17,10 @@
|
|||
#include <linux/const.h>
|
||||
|
||||
/* Status register flags */
|
||||
#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */
|
||||
#define SR_PIE _AC(0x00000020, UL) /* Previous IE */
|
||||
#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */
|
||||
#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
|
||||
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
|
||||
#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
|
||||
#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
|
||||
#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
|
||||
|
||||
#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
|
||||
#define SR_FS_OFF _AC(0x00000000, UL)
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
||||
|
||||
/*
|
||||
|
@ -36,8 +34,6 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
|||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/* Generic IO read/write. These perform native-endian accesses. */
|
||||
#define __raw_writeb __raw_writeb
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
|
|
|
@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
|
|||
/* unconditionally enable interrupts */
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
csr_set(sstatus, SR_IE);
|
||||
csr_set(sstatus, SR_SIE);
|
||||
}
|
||||
|
||||
/* unconditionally disable interrupts */
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
csr_clear(sstatus, SR_IE);
|
||||
csr_clear(sstatus, SR_SIE);
|
||||
}
|
||||
|
||||
/* get status and disable interrupts */
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
return csr_read_clear(sstatus, SR_IE);
|
||||
return csr_read_clear(sstatus, SR_SIE);
|
||||
}
|
||||
|
||||
/* test flags */
|
||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return !(flags & SR_IE);
|
||||
return !(flags & SR_SIE);
|
||||
}
|
||||
|
||||
/* test hardware interrupt enable bit */
|
||||
|
@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
|
|||
/* set interrupt enabled status */
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
csr_set(sstatus, flags & SR_IE);
|
||||
csr_set(sstatus, flags & SR_SIE);
|
||||
}
|
||||
|
||||
#endif /* _ASM_RISCV_IRQFLAGS_H */
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
/* Page Upper Directory not used in RISC-V */
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -413,8 +411,6 @@ static inline void pgtable_cache_init(void)
|
|||
/* No page table caches to initialize */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
|
||||
#define VMALLOC_END (PAGE_OFFSET - 1)
|
||||
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
|
||||
|
|
|
@ -66,7 +66,7 @@ struct pt_regs {
|
|||
#define REG_FMT "%08lx"
|
||||
#endif
|
||||
|
||||
#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
|
||||
#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
|
||||
|
||||
|
||||
/* Helpers for working with the instruction pointer */
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
#ifndef _ASM_RISCV_TLBFLUSH_H
|
||||
#define _ASM_RISCV_TLBFLUSH_H
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
/*
|
||||
|
@ -64,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
|
|||
flush_tlb_all();
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#endif /* _ASM_RISCV_TLBFLUSH_H */
|
||||
|
|
|
@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
|
|||
* call.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define __get_user_asm(insn, x, ptr, err) \
|
||||
do { \
|
||||
uintptr_t __tmp; \
|
||||
|
@ -153,13 +152,11 @@ do { \
|
|||
__disable_user_access(); \
|
||||
(x) = __x; \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __get_user_8(x, ptr, err) \
|
||||
__get_user_asm("ld", x, ptr, err)
|
||||
#else /* !CONFIG_64BIT */
|
||||
#ifdef CONFIG_MMU
|
||||
#define __get_user_8(x, ptr, err) \
|
||||
do { \
|
||||
u32 __user *__ptr = (u32 __user *)(ptr); \
|
||||
|
@ -193,7 +190,6 @@ do { \
|
|||
(x) = (__typeof__(x))((__typeof__((x)-(x)))( \
|
||||
(((u64)__hi << 32) | __lo))); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
|
||||
|
@ -267,8 +263,6 @@ do { \
|
|||
((x) = 0, -EFAULT); \
|
||||
})
|
||||
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define __put_user_asm(insn, x, ptr, err) \
|
||||
do { \
|
||||
uintptr_t __tmp; \
|
||||
|
@ -292,14 +286,11 @@ do { \
|
|||
: "rJ" (__x), "i" (-EFAULT)); \
|
||||
__disable_user_access(); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __put_user_8(x, ptr, err) \
|
||||
__put_user_asm("sd", x, ptr, err)
|
||||
#else /* !CONFIG_64BIT */
|
||||
#ifdef CONFIG_MMU
|
||||
#define __put_user_8(x, ptr, err) \
|
||||
do { \
|
||||
u32 __user *__ptr = (u32 __user *)(ptr); \
|
||||
|
@ -329,7 +320,6 @@ do { \
|
|||
: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
|
||||
__disable_user_access(); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
|
||||
|
@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
|||
* will set "err" to -EFAULT, while successful accesses return the previous
|
||||
* value.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
|
||||
({ \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
|
@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
|||
(err) = __err; \
|
||||
__ret; \
|
||||
})
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#endif /* _ASM_RISCV_UACCESS_H */
|
||||
|
|
|
@ -14,3 +14,4 @@
|
|||
#define __ARCH_HAVE_MMU
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
#include <uapi/asm/unistd.h>
|
||||
#include <uapi/asm/syscalls.h>
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2017 SiFive
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
|
||||
#define _ASM_RISCV_VDSO_SYSCALLS_H
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/* These syscalls are only used by the vDSO and are not in the uapi. */
|
||||
#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
|
||||
__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_VDSO_H */
|
|
@ -0,0 +1,26 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2017 SiFive
|
||||
*/
|
||||
|
||||
#ifndef _ASM__UAPI__SYSCALLS_H
|
||||
#define _ASM__UAPI__SYSCALLS_H
|
||||
|
||||
/*
|
||||
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
|
||||
* having a direct 'fence.i' instruction available to userspace (which we
|
||||
* can't trap!), that's not actually viable when running on Linux because the
|
||||
* kernel might schedule a process on another hart. There is no way for
|
||||
* userspace to handle this without invoking the kernel (as it doesn't know the
|
||||
* thread->hart mappings), so we've defined a RISC-V specific system call to
|
||||
* flush the instruction cache.
|
||||
*
|
||||
* __NR_riscv_flush_icache is defined to flush the instruction cache over an
|
||||
* address range, with the flush applying to either all threads or just the
|
||||
* caller. We don't currently do anything with the address range, that's just
|
||||
* in there for forwards compatibility.
|
||||
*/
|
||||
#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
|
||||
__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
|
||||
|
||||
#endif
|
|
@ -196,7 +196,7 @@ handle_syscall:
|
|||
addi s2, s2, 0x4
|
||||
REG_S s2, PT_SEPC(sp)
|
||||
/* System calls run with interrupts enabled */
|
||||
csrs sstatus, SR_IE
|
||||
csrs sstatus, SR_SIE
|
||||
/* Trace syscalls, but only if requested by the user. */
|
||||
REG_L t0, TASK_TI_FLAGS(tp)
|
||||
andi t0, t0, _TIF_SYSCALL_TRACE
|
||||
|
@ -224,8 +224,8 @@ ret_from_syscall:
|
|||
|
||||
ret_from_exception:
|
||||
REG_L s0, PT_SSTATUS(sp)
|
||||
csrc sstatus, SR_IE
|
||||
andi s0, s0, SR_PS
|
||||
csrc sstatus, SR_SIE
|
||||
andi s0, s0, SR_SPP
|
||||
bnez s0, restore_all
|
||||
|
||||
resume_userspace:
|
||||
|
@ -255,7 +255,7 @@ work_pending:
|
|||
bnez s1, work_resched
|
||||
work_notifysig:
|
||||
/* Handle pending signals and notify-resume requests */
|
||||
csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
|
||||
csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
|
||||
move a0, sp /* pt_regs */
|
||||
move a1, s0 /* current_thread_info->flags */
|
||||
tail do_notify_resume
|
||||
|
|
|
@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
|
|||
void start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
{
|
||||
regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
|
||||
regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
|
||||
regs->sepc = pc;
|
||||
regs->sp = sp;
|
||||
set_fs(USER_DS);
|
||||
|
@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
const register unsigned long gp __asm__ ("gp");
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp;
|
||||
childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
|
||||
childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
|
||||
|
||||
p->thread.ra = (unsigned long)ret_from_kernel_thread;
|
||||
p->thread.s[0] = usp; /* fn */
|
||||
|
|
|
@ -23,5 +23,4 @@
|
|||
void *sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/vdso-syscalls.h>
|
||||
};
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/vdso-syscalls.h>
|
||||
|
||||
.text
|
||||
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
|
||||
|
|
|
@ -63,7 +63,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
|||
goto vmalloc_fault;
|
||||
|
||||
/* Enable interrupts if they were enabled in the parent context. */
|
||||
if (likely(regs->sstatus & SR_PIE))
|
||||
if (likely(regs->sstatus & SR_SPIE))
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sh_eth.h>
|
||||
#include <mach-se/mach/se.h>
|
||||
#include <mach-se/mach/mrshpc.h>
|
||||
#include <asm/machvec.h>
|
||||
|
@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
|
|||
#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7712)
|
||||
/* SH771X Ethernet driver */
|
||||
static struct sh_eth_plat_data sh_eth_plat = {
|
||||
.phy = PHY_ID,
|
||||
.phy_interface = PHY_INTERFACE_MODE_MII,
|
||||
};
|
||||
|
||||
static struct resource sh_eth0_resources[] = {
|
||||
[0] = {
|
||||
.start = SH_ETH0_BASE,
|
||||
.end = SH_ETH0_BASE + 0x1B8,
|
||||
.end = SH_ETH0_BASE + 0x1B8 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = SH_TSU_BASE,
|
||||
.end = SH_TSU_BASE + 0x200 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[2] = {
|
||||
.start = SH_ETH0_IRQ,
|
||||
.end = SH_ETH0_IRQ,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
|
@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
|
|||
.name = "sh771x-ether",
|
||||
.id = 0,
|
||||
.dev = {
|
||||
.platform_data = PHY_ID,
|
||||
.platform_data = &sh_eth_plat,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(sh_eth0_resources),
|
||||
.resource = sh_eth0_resources,
|
||||
|
@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
|
|||
static struct resource sh_eth1_resources[] = {
|
||||
[0] = {
|
||||
.start = SH_ETH1_BASE,
|
||||
.end = SH_ETH1_BASE + 0x1B8,
|
||||
.end = SH_ETH1_BASE + 0x1B8 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = SH_TSU_BASE,
|
||||
.end = SH_TSU_BASE + 0x200 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[2] = {
|
||||
.start = SH_ETH1_IRQ,
|
||||
.end = SH_ETH1_IRQ,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
|
@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
|
|||
.name = "sh771x-ether",
|
||||
.id = 1,
|
||||
.dev = {
|
||||
.platform_data = PHY_ID,
|
||||
.platform_data = &sh_eth_plat,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(sh_eth1_resources),
|
||||
.resource = sh_eth1_resources,
|
||||
|
|
|
@ -100,6 +100,7 @@
|
|||
/* Base address */
|
||||
#define SH_ETH0_BASE 0xA7000000
|
||||
#define SH_ETH1_BASE 0xA7000400
|
||||
#define SH_TSU_BASE 0xA7000800
|
||||
/* PHY ID */
|
||||
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
|
||||
# define PHY_ID 0x00
|
||||
|
|
|
@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
|
|||
}
|
||||
}
|
||||
|
||||
void blk_drain_queue(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
__blk_drain_queue(q, true);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_bypass_start - enter queue bypass mode
|
||||
* @q: queue of interest
|
||||
|
@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||
*/
|
||||
blk_freeze_queue(q);
|
||||
spin_lock_irq(lock);
|
||||
if (!q->mq_ops)
|
||||
__blk_drain_queue(q, true);
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
|
|
|
@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
|
|||
* exported to drivers as the only user for unfreeze is blk_mq.
|
||||
*/
|
||||
blk_freeze_queue_start(q);
|
||||
if (!q->mq_ops)
|
||||
blk_drain_queue(q);
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
}
|
||||
|
||||
|
|
|
@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
|
|||
}
|
||||
#endif /* CONFIG_BOUNCE */
|
||||
|
||||
extern void blk_drain_queue(struct request_queue *q);
|
||||
|
||||
#endif /* BLK_INTERNAL_H */
|
||||
|
|
|
@ -1581,9 +1581,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
static void __lo_release(struct loop_device *lo)
|
||||
{
|
||||
struct loop_device *lo = disk->private_data;
|
||||
int err;
|
||||
|
||||
if (atomic_dec_return(&lo->lo_refcnt))
|
||||
|
@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
|||
mutex_unlock(&lo->lo_ctl_mutex);
|
||||
}
|
||||
|
||||
static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
mutex_lock(&loop_index_mutex);
|
||||
__lo_release(disk->private_data);
|
||||
mutex_unlock(&loop_index_mutex);
|
||||
}
|
||||
|
||||
static const struct block_device_operations lo_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = lo_open,
|
||||
|
|
|
@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
|
|||
mutex_unlock(&rbd_dev->watch_mutex);
|
||||
}
|
||||
|
||||
static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
|
||||
{
|
||||
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
|
||||
|
||||
strcpy(rbd_dev->lock_cookie, cookie);
|
||||
rbd_set_owner_cid(rbd_dev, &cid);
|
||||
queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* lock_rwsem must be held for write
|
||||
*/
|
||||
static int rbd_lock(struct rbd_device *rbd_dev)
|
||||
{
|
||||
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
|
||||
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
|
||||
char cookie[32];
|
||||
int ret;
|
||||
|
||||
|
@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
|
|||
return ret;
|
||||
|
||||
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
|
||||
strcpy(rbd_dev->lock_cookie, cookie);
|
||||
rbd_set_owner_cid(rbd_dev, &cid);
|
||||
queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
|
||||
__rbd_lock(rbd_dev, cookie);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
|
|||
queue_delayed_work(rbd_dev->task_wq,
|
||||
&rbd_dev->lock_dwork, 0);
|
||||
} else {
|
||||
strcpy(rbd_dev->lock_cookie, cookie);
|
||||
__rbd_lock(rbd_dev, cookie);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
|||
segment_size = rbd_obj_bytes(&rbd_dev->header);
|
||||
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
|
||||
q->limits.max_sectors = queue_max_hw_sectors(q);
|
||||
blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
|
||||
blk_queue_max_segments(q, USHRT_MAX);
|
||||
blk_queue_max_segment_size(q, segment_size);
|
||||
blk_queue_io_min(q, segment_size);
|
||||
blk_queue_io_opt(q, segment_size);
|
||||
|
|
|
@ -2892,6 +2892,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
|
||||
|
||||
/**
|
||||
* gpiod_set_value_nocheck() - set a GPIO line value without checking
|
||||
* @desc: the descriptor to set the value on
|
||||
* @value: value to set
|
||||
*
|
||||
* This sets the value of a GPIO line backing a descriptor, applying
|
||||
* different semantic quirks like active low and open drain/source
|
||||
* handling.
|
||||
*/
|
||||
static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
|
||||
{
|
||||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
value = !value;
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
gpio_set_open_drain_value_commit(desc, value);
|
||||
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
|
||||
gpio_set_open_source_value_commit(desc, value);
|
||||
else
|
||||
gpiod_set_raw_value_commit(desc, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* gpiod_set_value() - assign a gpio's value
|
||||
* @desc: gpio whose value will be assigned
|
||||
|
@ -2906,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
|
|||
void gpiod_set_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
VALIDATE_DESC_VOID(desc);
|
||||
/* Should be using gpiod_set_value_cansleep() */
|
||||
WARN_ON(desc->gdev->chip->can_sleep);
|
||||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
value = !value;
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
gpio_set_open_drain_value_commit(desc, value);
|
||||
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
|
||||
gpio_set_open_source_value_commit(desc, value);
|
||||
else
|
||||
gpiod_set_raw_value_commit(desc, value);
|
||||
gpiod_set_value_nocheck(desc, value);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_set_value);
|
||||
|
||||
|
@ -3243,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
|
|||
{
|
||||
might_sleep_if(extra_checks);
|
||||
VALIDATE_DESC_VOID(desc);
|
||||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
value = !value;
|
||||
gpiod_set_raw_value_commit(desc, value);
|
||||
gpiod_set_value_nocheck(desc, value);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
|
||||
|
||||
|
|
|
@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
|||
now = tmr_cnt_read(etsects);
|
||||
now += delta;
|
||||
tmr_cnt_write(etsects, now);
|
||||
set_fipers(etsects);
|
||||
|
||||
spin_unlock_irqrestore(&etsects->lock, flags);
|
||||
|
||||
set_fipers(etsects);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
|||
return 0;
|
||||
}
|
||||
|
||||
wmb(); /* reset needs to be written before we read control register */
|
||||
/* Reset needs to be written before we read control register, and
|
||||
* we must wait for the HW to become responsive once again
|
||||
*/
|
||||
wmb();
|
||||
msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
|
||||
|
||||
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
|
||||
do {
|
||||
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#define MLXSW_PCI_SW_RESET 0xF0010
|
||||
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
|
||||
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
|
||||
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
|
||||
#define MLXSW_PCI_FW_READY 0xA1844
|
||||
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
|
||||
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
|
||||
|
|
|
@ -178,7 +178,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
int tclass_num, u32 min, u32 max,
|
||||
u32 probability, bool is_ecn)
|
||||
{
|
||||
char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
|
||||
char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
|
||||
char cwtp_cmd[MLXSW_REG_CWTP_LEN];
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
int err;
|
||||
|
||||
|
@ -192,10 +193,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
|
||||
mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
|
||||
MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
|
||||
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
|
|||
return err;
|
||||
}
|
||||
nn_writeb(nn, ctrl_offset, entry->entry);
|
||||
nfp_net_irq_unmask(nn, entry->entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
|
|||
unsigned int vector_idx)
|
||||
{
|
||||
nn_writeb(nn, ctrl_offset, 0xff);
|
||||
nn_pci_flush(nn);
|
||||
free_irq(nn->irq_entries[vector_idx].vector, nn);
|
||||
}
|
||||
|
||||
|
|
|
@ -384,6 +384,18 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
|
|||
}
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_PS) {
|
||||
list_for_each_entry(tmp, &wcn->vif_list, list) {
|
||||
vif = wcn36xx_priv_to_vif(tmp);
|
||||
if (hw->conf.flags & IEEE80211_CONF_PS) {
|
||||
if (vif->bss_conf.ps) /* ps allowed ? */
|
||||
wcn36xx_pmc_enter_bmps_state(wcn, vif);
|
||||
} else {
|
||||
wcn36xx_pmc_exit_bmps_state(wcn, vif);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&wcn->conf_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -757,17 +769,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
|
|||
vif_priv->dtim_period = bss_conf->dtim_period;
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_PS) {
|
||||
wcn36xx_dbg(WCN36XX_DBG_MAC,
|
||||
"mac bss PS set %d\n",
|
||||
bss_conf->ps);
|
||||
if (bss_conf->ps) {
|
||||
wcn36xx_pmc_enter_bmps_state(wcn, vif);
|
||||
} else {
|
||||
wcn36xx_pmc_exit_bmps_state(wcn, vif);
|
||||
}
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_BSSID) {
|
||||
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
|
||||
bss_conf->bssid);
|
||||
|
|
|
@ -45,8 +45,10 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
|
|||
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
|
||||
|
||||
if (WCN36XX_BMPS != vif_priv->pw_state) {
|
||||
wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
|
||||
return -EINVAL;
|
||||
/* Unbalanced call or last BMPS enter failed */
|
||||
wcn36xx_dbg(WCN36XX_DBG_PMC,
|
||||
"Not in BMPS mode, no need to exit\n");
|
||||
return -EALREADY;
|
||||
}
|
||||
wcn36xx_smd_exit_bmps(wcn, vif);
|
||||
vif_priv->pw_state = WCN36XX_FULL_POWER;
|
||||
|
|
|
@ -670,11 +670,15 @@ static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
|
|||
return index & (q->n_window - 1);
|
||||
}
|
||||
|
||||
static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie,
|
||||
static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq, int idx)
|
||||
{
|
||||
return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq,
|
||||
idx);
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (trans->cfg->use_tfh)
|
||||
idx = iwl_pcie_get_cmd_index(txq, idx);
|
||||
|
||||
return txq->tfds + trans_pcie->tfd_size * idx;
|
||||
}
|
||||
|
||||
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
|
||||
|
|
|
@ -171,8 +171,6 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
|
|||
|
||||
static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
|
||||
* idx is bounded by n_window
|
||||
*/
|
||||
|
@ -181,7 +179,7 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
|||
lockdep_assert_held(&txq->lock);
|
||||
|
||||
iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
|
||||
iwl_pcie_get_tfd(trans_pcie, txq, idx));
|
||||
iwl_pcie_get_tfd(trans, txq, idx));
|
||||
|
||||
/* free SKB */
|
||||
if (txq->entries) {
|
||||
|
@ -364,11 +362,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
|||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
|
||||
struct iwl_tfh_tfd *tfd =
|
||||
iwl_pcie_get_tfd(trans_pcie, txq, idx);
|
||||
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
|
||||
dma_addr_t tb_phys;
|
||||
bool amsdu;
|
||||
int i, len, tb1_len, tb2_len, hdr_len;
|
||||
|
@ -565,8 +561,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
|
|||
u8 group_id = iwl_cmd_groupid(cmd->id);
|
||||
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
|
||||
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
|
||||
struct iwl_tfh_tfd *tfd =
|
||||
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
|
||||
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
|
||||
|
||||
memset(tfd, 0, sizeof(*tfd));
|
||||
|
||||
|
|
|
@ -373,7 +373,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int i, num_tbs;
|
||||
void *tfd = iwl_pcie_get_tfd(trans_pcie, txq, index);
|
||||
void *tfd = iwl_pcie_get_tfd(trans, txq, index);
|
||||
|
||||
/* Sanity check on number of chunks */
|
||||
num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
|
||||
|
@ -2018,7 +2018,7 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
|
||||
iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
|
||||
trans_pcie->tfd_size,
|
||||
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
|
||||
hdr_len);
|
||||
|
@ -2092,7 +2092,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
IEEE80211_CCMP_HDR_LEN : 0;
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||
iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr),
|
||||
iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
|
||||
trans_pcie->tfd_size,
|
||||
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
|
||||
|
||||
|
@ -2425,7 +2425,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
|
||||
tfd = iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr);
|
||||
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
|
||||
iwl_pcie_tfd_get_num_tbs(trans, tfd));
|
||||
|
|
|
@ -1335,6 +1335,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
|
|||
struct nvme_ns *ns, struct nvme_id_ns *id)
|
||||
{
|
||||
sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
|
||||
unsigned short bs = 1 << ns->lba_shift;
|
||||
unsigned stream_alignment = 0;
|
||||
|
||||
if (ns->ctrl->nr_streams && ns->sws && ns->sgs)
|
||||
|
@ -1343,7 +1344,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
|
|||
blk_mq_freeze_queue(disk->queue);
|
||||
blk_integrity_unregister(disk);
|
||||
|
||||
blk_queue_logical_block_size(disk->queue, 1 << ns->lba_shift);
|
||||
blk_queue_logical_block_size(disk->queue, bs);
|
||||
blk_queue_physical_block_size(disk->queue, bs);
|
||||
blk_queue_io_min(disk->queue, bs);
|
||||
|
||||
if (ns->ms && !ns->ext &&
|
||||
(ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
|
||||
nvme_init_integrity(disk, ns->ms, ns->pi_type);
|
||||
|
@ -2987,6 +2991,7 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||
mutex_unlock(&ns->ctrl->namespaces_mutex);
|
||||
|
||||
synchronize_srcu(&ns->head->srcu);
|
||||
nvme_mpath_check_last_path(ns);
|
||||
nvme_put_ns(ns);
|
||||
}
|
||||
|
||||
|
|
|
@ -417,6 +417,15 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
|
|||
rcu_assign_pointer(head->current_path, NULL);
|
||||
}
|
||||
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
|
||||
|
||||
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
||||
{
|
||||
struct nvme_ns_head *head = ns->head;
|
||||
|
||||
if (head->disk && list_empty(&head->list))
|
||||
kblockd_schedule_work(&head->requeue_work);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void nvme_failover_req(struct request *req)
|
||||
{
|
||||
|
@ -448,6 +457,9 @@ static inline void nvme_mpath_remove_disk_links(struct nvme_ns *ns)
|
|||
static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
|
||||
{
|
||||
}
|
||||
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_NVME_MULTIPATH */
|
||||
|
||||
#ifdef CONFIG_NVM
|
||||
|
|
|
@ -448,12 +448,31 @@ static void **nvme_pci_iod_list(struct request *req)
|
|||
return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
|
||||
}
|
||||
|
||||
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
unsigned int avg_seg_size;
|
||||
|
||||
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
|
||||
blk_rq_nr_phys_segments(req));
|
||||
|
||||
if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
|
||||
return false;
|
||||
if (!iod->nvmeq->qid)
|
||||
return false;
|
||||
if (!sgl_threshold || avg_seg_size < sgl_threshold)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
|
||||
int nseg = blk_rq_nr_phys_segments(rq);
|
||||
unsigned int size = blk_rq_payload_bytes(rq);
|
||||
|
||||
iod->use_sgl = nvme_pci_use_sgls(dev, rq);
|
||||
|
||||
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
|
||||
size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
|
||||
iod->use_sgl);
|
||||
|
@ -604,8 +623,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
|
|||
dma_addr_t prp_dma;
|
||||
int nprps, i;
|
||||
|
||||
iod->use_sgl = false;
|
||||
|
||||
length -= (page_size - offset);
|
||||
if (length <= 0) {
|
||||
iod->first_dma = 0;
|
||||
|
@ -715,8 +732,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
|
|||
int entries = iod->nents, i = 0;
|
||||
dma_addr_t sgl_dma;
|
||||
|
||||
iod->use_sgl = true;
|
||||
|
||||
/* setting the transfer type as SGL */
|
||||
cmd->flags = NVME_CMD_SGL_METABUF;
|
||||
|
||||
|
@ -770,23 +785,6 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
|
|||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
|
||||
{
|
||||
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
|
||||
unsigned int avg_seg_size;
|
||||
|
||||
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req),
|
||||
blk_rq_nr_phys_segments(req));
|
||||
|
||||
if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1))))
|
||||
return false;
|
||||
if (!iod->nvmeq->qid)
|
||||
return false;
|
||||
if (!sgl_threshold || avg_seg_size < sgl_threshold)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
||||
struct nvme_command *cmnd)
|
||||
{
|
||||
|
@ -806,7 +804,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
|
|||
DMA_ATTR_NO_WARN))
|
||||
goto out;
|
||||
|
||||
if (nvme_pci_use_sgls(dev, req))
|
||||
if (iod->use_sgl)
|
||||
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
|
||||
else
|
||||
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
|
||||
|
|
|
@ -974,12 +974,18 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
|||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
|
||||
/* state change failure should never happen */
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
nvme_rdma_reconnect_or_remove(ctrl);
|
||||
}
|
||||
|
||||
static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
|
||||
{
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING))
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
|
||||
return;
|
||||
|
||||
queue_work(nvme_wq, &ctrl->err_work);
|
||||
|
@ -1753,6 +1759,12 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|||
nvme_stop_ctrl(&ctrl->ctrl);
|
||||
nvme_rdma_shutdown_ctrl(ctrl, false);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
|
||||
/* state change failure should never happen */
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = nvme_rdma_configure_admin_queue(ctrl, false);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
|
|
@ -1085,7 +1085,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
|
|||
const char *buf, size_t count)
|
||||
{
|
||||
struct fcloop_nport *nport = NULL, *tmpport;
|
||||
struct fcloop_tport *tport;
|
||||
struct fcloop_tport *tport = NULL;
|
||||
u64 nodename, portname;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
|
|
@ -236,7 +236,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
|
|||
rc = of_mdiobus_register_phy(mdio, child, addr);
|
||||
else
|
||||
rc = of_mdiobus_register_device(mdio, child, addr);
|
||||
if (rc)
|
||||
|
||||
if (rc == -ENODEV)
|
||||
dev_err(&mdio->dev,
|
||||
"MDIO device at address %d is missing.\n",
|
||||
addr);
|
||||
else if (rc)
|
||||
goto unregister;
|
||||
}
|
||||
|
||||
|
@ -260,7 +265,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
|
|||
|
||||
if (of_mdiobus_child_is_phy(child)) {
|
||||
rc = of_mdiobus_register_phy(mdio, child, addr);
|
||||
if (rc)
|
||||
if (rc && rc != -ENODEV)
|
||||
goto unregister;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,14 @@ struct bpf_map_ops {
|
|||
};
|
||||
|
||||
struct bpf_map {
|
||||
atomic_t refcnt;
|
||||
/* 1st cacheline with read-mostly members of which some
|
||||
* are also accessed in fast-path (e.g. ops, max_entries).
|
||||
*/
|
||||
const struct bpf_map_ops *ops ____cacheline_aligned;
|
||||
struct bpf_map *inner_map_meta;
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
enum bpf_map_type map_type;
|
||||
u32 key_size;
|
||||
u32 value_size;
|
||||
|
@ -53,15 +60,17 @@ struct bpf_map {
|
|||
u32 pages;
|
||||
u32 id;
|
||||
int numa_node;
|
||||
struct user_struct *user;
|
||||
const struct bpf_map_ops *ops;
|
||||
struct work_struct work;
|
||||
bool unpriv_array;
|
||||
/* 7 bytes hole */
|
||||
|
||||
/* 2nd cacheline with misc members to avoid false sharing
|
||||
* particularly with refcounting.
|
||||
*/
|
||||
struct user_struct *user ____cacheline_aligned;
|
||||
atomic_t refcnt;
|
||||
atomic_t usercnt;
|
||||
struct bpf_map *inner_map_meta;
|
||||
struct work_struct work;
|
||||
char name[BPF_OBJ_NAME_LEN];
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* function argument constraints */
|
||||
|
@ -230,6 +239,7 @@ struct bpf_prog_aux {
|
|||
struct bpf_array {
|
||||
struct bpf_map map;
|
||||
u32 elem_size;
|
||||
u32 index_mask;
|
||||
/* 'ownership' of prog_array is claimed by the first program that
|
||||
* is going to use this map or by the first program which FD is stored
|
||||
* in the map to make sure that all callers and callees have the same
|
||||
|
|
|
@ -1396,6 +1396,13 @@ config BPF_SYSCALL
|
|||
Enable the bpf() system call that allows to manipulate eBPF
|
||||
programs and maps via file descriptors.
|
||||
|
||||
config BPF_JIT_ALWAYS_ON
|
||||
bool "Permanently enable BPF JIT and remove BPF interpreter"
|
||||
depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
|
||||
help
|
||||
Enables BPF JIT and removes BPF interpreter to avoid
|
||||
speculative execution of BPF instructions by the interpreter
|
||||
|
||||
config USERFAULTFD
|
||||
bool "Enable userfaultfd() system call"
|
||||
select ANON_INODES
|
||||
|
|
|
@ -53,9 +53,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
{
|
||||
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
u32 elem_size, index_mask, max_entries;
|
||||
bool unpriv = !capable(CAP_SYS_ADMIN);
|
||||
struct bpf_array *array;
|
||||
u64 array_size;
|
||||
u32 elem_size;
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||
|
@ -72,11 +73,20 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
|
||||
elem_size = round_up(attr->value_size, 8);
|
||||
|
||||
max_entries = attr->max_entries;
|
||||
index_mask = roundup_pow_of_two(max_entries) - 1;
|
||||
|
||||
if (unpriv)
|
||||
/* round up array size to nearest power of 2,
|
||||
* since cpu will speculate within index_mask limits
|
||||
*/
|
||||
max_entries = index_mask + 1;
|
||||
|
||||
array_size = sizeof(*array);
|
||||
if (percpu)
|
||||
array_size += (u64) attr->max_entries * sizeof(void *);
|
||||
array_size += (u64) max_entries * sizeof(void *);
|
||||
else
|
||||
array_size += (u64) attr->max_entries * elem_size;
|
||||
array_size += (u64) max_entries * elem_size;
|
||||
|
||||
/* make sure there is no u32 overflow later in round_up() */
|
||||
if (array_size >= U32_MAX - PAGE_SIZE)
|
||||
|
@ -86,6 +96,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
array = bpf_map_area_alloc(array_size, numa_node);
|
||||
if (!array)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
array->index_mask = index_mask;
|
||||
array->map.unpriv_array = unpriv;
|
||||
|
||||
/* copy mandatory map attributes */
|
||||
array->map.map_type = attr->map_type;
|
||||
|
@ -121,12 +133,13 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
if (unlikely(index >= array->map.max_entries))
|
||||
return NULL;
|
||||
|
||||
return array->value + array->elem_size * index;
|
||||
return array->value + array->elem_size * (index & array->index_mask);
|
||||
}
|
||||
|
||||
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
|
||||
static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
struct bpf_insn *insn = insn_buf;
|
||||
u32 elem_size = round_up(map->value_size, 8);
|
||||
const int ret = BPF_REG_0;
|
||||
|
@ -135,7 +148,12 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
|||
|
||||
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
|
||||
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
|
||||
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
|
||||
if (map->unpriv_array) {
|
||||
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
|
||||
*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
|
||||
} else {
|
||||
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
|
||||
}
|
||||
|
||||
if (is_power_of_2(elem_size)) {
|
||||
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
|
||||
|
@ -157,7 +175,7 @@ static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
if (unlikely(index >= array->map.max_entries))
|
||||
return NULL;
|
||||
|
||||
return this_cpu_ptr(array->pptrs[index]);
|
||||
return this_cpu_ptr(array->pptrs[index & array->index_mask]);
|
||||
}
|
||||
|
||||
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
|
||||
|
@ -177,7 +195,7 @@ int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
|
|||
*/
|
||||
size = round_up(map->value_size, 8);
|
||||
rcu_read_lock();
|
||||
pptr = array->pptrs[index];
|
||||
pptr = array->pptrs[index & array->index_mask];
|
||||
for_each_possible_cpu(cpu) {
|
||||
bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
|
||||
off += size;
|
||||
|
@ -225,10 +243,11 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
return -EEXIST;
|
||||
|
||||
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
|
||||
memcpy(this_cpu_ptr(array->pptrs[index]),
|
||||
memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
|
||||
value, map->value_size);
|
||||
else
|
||||
memcpy(array->value + array->elem_size * index,
|
||||
memcpy(array->value +
|
||||
array->elem_size * (index & array->index_mask),
|
||||
value, map->value_size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -262,7 +281,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
|
|||
*/
|
||||
size = round_up(map->value_size, 8);
|
||||
rcu_read_lock();
|
||||
pptr = array->pptrs[index];
|
||||
pptr = array->pptrs[index & array->index_mask];
|
||||
for_each_possible_cpu(cpu) {
|
||||
bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
|
||||
off += size;
|
||||
|
@ -613,6 +632,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
|
|||
static u32 array_of_map_gen_lookup(struct bpf_map *map,
|
||||
struct bpf_insn *insn_buf)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
u32 elem_size = round_up(map->value_size, 8);
|
||||
struct bpf_insn *insn = insn_buf;
|
||||
const int ret = BPF_REG_0;
|
||||
|
@ -621,7 +641,12 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
|
|||
|
||||
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
|
||||
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
|
||||
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
|
||||
if (map->unpriv_array) {
|
||||
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
|
||||
*insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
|
||||
} else {
|
||||
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
|
||||
}
|
||||
if (is_power_of_2(elem_size))
|
||||
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
|
||||
else
|
||||
|
|
|
@ -781,6 +781,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(__bpf_call_base);
|
||||
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
/**
|
||||
* __bpf_prog_run - run eBPF program on a given context
|
||||
* @ctx: is the data we are operating on
|
||||
|
@ -1377,6 +1378,14 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth)
|
|||
insn->code = BPF_JMP | BPF_CALL_ARGS;
|
||||
}
|
||||
|
||||
#else
|
||||
static unsigned int __bpf_prog_ret0(const void *ctx,
|
||||
const struct bpf_insn *insn)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
bool bpf_prog_array_compatible(struct bpf_array *array,
|
||||
const struct bpf_prog *fp)
|
||||
{
|
||||
|
@ -1427,9 +1436,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
|
|||
*/
|
||||
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||
{
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
|
||||
|
||||
fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
|
||||
#else
|
||||
fp->bpf_func = __bpf_prog_ret0;
|
||||
#endif
|
||||
|
||||
/* eBPF JITs can rewrite the program in case constant
|
||||
* blinding is active. However, in case of error during
|
||||
|
@ -1439,6 +1452,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
|||
*/
|
||||
if (!bpf_prog_is_dev_bound(fp->aux)) {
|
||||
fp = bpf_int_jit_compile(fp);
|
||||
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
if (!fp->jited) {
|
||||
*err = -ENOTSUPP;
|
||||
return fp;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
*err = bpf_prog_offload_compile(fp);
|
||||
if (*err)
|
||||
|
|
|
@ -583,8 +583,15 @@ static void sock_map_free(struct bpf_map *map)
|
|||
|
||||
write_lock_bh(&sock->sk_callback_lock);
|
||||
psock = smap_psock_sk(sock);
|
||||
smap_list_remove(psock, &stab->sock_map[i]);
|
||||
smap_release_sock(psock, sock);
|
||||
/* This check handles a racing sock event that can get the
|
||||
* sk_callback_lock before this case but after xchg happens
|
||||
* causing the refcnt to hit zero and sock user data (psock)
|
||||
* to be null and queued for garbage collection.
|
||||
*/
|
||||
if (likely(psock)) {
|
||||
smap_list_remove(psock, &stab->sock_map[i]);
|
||||
smap_release_sock(psock, sock);
|
||||
}
|
||||
write_unlock_bh(&sock->sk_callback_lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -1523,6 +1523,7 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
|
|||
goto continue_func;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
||||
const struct bpf_insn *insn, int idx)
|
||||
{
|
||||
|
@ -1537,6 +1538,7 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
|
|||
subprog++;
|
||||
return env->subprog_stack_depth[subprog];
|
||||
}
|
||||
#endif
|
||||
|
||||
/* truncate register to smaller size (in bytes)
|
||||
* must be called with size < BPF_REG_SIZE
|
||||
|
@ -2321,6 +2323,13 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
|||
err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
|
||||
if (err)
|
||||
return err;
|
||||
if (func_id == BPF_FUNC_tail_call) {
|
||||
if (meta.map_ptr == NULL) {
|
||||
verbose(env, "verifier bug\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
|
||||
}
|
||||
err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -5264,14 +5273,20 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||
|
||||
static int fixup_call_args(struct bpf_verifier_env *env)
|
||||
{
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
struct bpf_prog *prog = env->prog;
|
||||
struct bpf_insn *insn = prog->insnsi;
|
||||
int i, depth;
|
||||
#endif
|
||||
int err;
|
||||
|
||||
if (env->prog->jit_requested)
|
||||
if (jit_subprogs(env) == 0)
|
||||
err = 0;
|
||||
if (env->prog->jit_requested) {
|
||||
err = jit_subprogs(env);
|
||||
if (err == 0)
|
||||
return 0;
|
||||
|
||||
}
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
for (i = 0; i < prog->len; i++, insn++) {
|
||||
if (insn->code != (BPF_JMP | BPF_CALL) ||
|
||||
insn->src_reg != BPF_PSEUDO_CALL)
|
||||
|
@ -5281,7 +5296,9 @@ static int fixup_call_args(struct bpf_verifier_env *env)
|
|||
return depth;
|
||||
bpf_patch_call_args(insn, depth);
|
||||
}
|
||||
return 0;
|
||||
err = 0;
|
||||
#endif
|
||||
return err;
|
||||
}
|
||||
|
||||
/* fixup insn->imm field of bpf_call instructions
|
||||
|
@ -5328,6 +5345,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|||
*/
|
||||
insn->imm = 0;
|
||||
insn->code = BPF_JMP | BPF_TAIL_CALL;
|
||||
|
||||
/* instead of changing every JIT dealing with tail_call
|
||||
* emit two extra insns:
|
||||
* if (index >= max_entries) goto out;
|
||||
* index &= array->index_mask;
|
||||
* to avoid out-of-bounds cpu speculation
|
||||
*/
|
||||
map_ptr = env->insn_aux_data[i + delta].map_ptr;
|
||||
if (map_ptr == BPF_MAP_PTR_POISON) {
|
||||
verbose(env, "tail_call obusing map_ptr\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!map_ptr->unpriv_array)
|
||||
continue;
|
||||
insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
|
||||
map_ptr->max_entries, 2);
|
||||
insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
|
||||
container_of(map_ptr,
|
||||
struct bpf_array,
|
||||
map)->index_mask);
|
||||
insn_buf[2] = *insn;
|
||||
cnt = 3;
|
||||
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
||||
if (!new_prog)
|
||||
return -ENOMEM;
|
||||
|
||||
delta += cnt - 1;
|
||||
env->prog = prog = new_prog;
|
||||
insn = new_prog->insnsi + i + delta;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -6250,9 +6250,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
/* We don't expect to fail. */
|
||||
if (*err) {
|
||||
pr_cont("FAIL to attach err=%d len=%d\n",
|
||||
pr_cont("FAIL to prog_create err=%d len=%d\n",
|
||||
*err, fprog.len);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -6276,6 +6275,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
|
|||
* checks.
|
||||
*/
|
||||
fp = bpf_prog_select_runtime(fp, err);
|
||||
if (*err) {
|
||||
pr_cont("FAIL to select_runtime err=%d\n", *err);
|
||||
return NULL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -6461,8 +6464,8 @@ static __init int test_bpf(void)
|
|||
pass_cnt++;
|
||||
continue;
|
||||
}
|
||||
|
||||
return err;
|
||||
err_cnt++;
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_cont("jited:%u ", fp->jited);
|
||||
|
|
|
@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
|
|||
vlan_gvrp_uninit_applicant(real_dev);
|
||||
}
|
||||
|
||||
/* Take it out of our own structures, but be sure to interlock with
|
||||
* HW accelerating devices or SW vlan input packet processing if
|
||||
* VLAN is not 0 (leave it there for 802.1p).
|
||||
*/
|
||||
if (vlan_id)
|
||||
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
|
||||
vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
|
||||
|
||||
/* Get rid of the vlan's reference to real_dev */
|
||||
dev_put(real_dev);
|
||||
|
|
|
@ -334,9 +334,8 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
|
|||
mutex_lock(&caifdevs->lock);
|
||||
list_add_rcu(&caifd->list, &caifdevs->list);
|
||||
|
||||
strncpy(caifd->layer.name, dev->name,
|
||||
sizeof(caifd->layer.name) - 1);
|
||||
caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
|
||||
strlcpy(caifd->layer.name, dev->name,
|
||||
sizeof(caifd->layer.name));
|
||||
caifd->layer.transmit = transmit;
|
||||
cfcnfg_add_phy_layer(cfg,
|
||||
dev,
|
||||
|
|
|
@ -176,9 +176,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
|
|||
dev_add_pack(&caif_usb_type);
|
||||
pack_added = true;
|
||||
|
||||
strncpy(layer->name, dev->name,
|
||||
sizeof(layer->name) - 1);
|
||||
layer->name[sizeof(layer->name) - 1] = 0;
|
||||
strlcpy(layer->name, dev->name, sizeof(layer->name));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -268,17 +268,15 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
|
|||
case CAIFPROTO_RFM:
|
||||
l->linktype = CFCTRL_SRV_RFM;
|
||||
l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
|
||||
strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
|
||||
sizeof(l->u.rfm.volume)-1);
|
||||
l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
|
||||
strlcpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
|
||||
sizeof(l->u.rfm.volume));
|
||||
break;
|
||||
case CAIFPROTO_UTIL:
|
||||
l->linktype = CFCTRL_SRV_UTIL;
|
||||
l->endpoint = 0x00;
|
||||
l->chtype = 0x00;
|
||||
strncpy(l->u.utility.name, s->sockaddr.u.util.service,
|
||||
sizeof(l->u.utility.name)-1);
|
||||
l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
|
||||
strlcpy(l->u.utility.name, s->sockaddr.u.util.service,
|
||||
sizeof(l->u.utility.name));
|
||||
caif_assert(sizeof(l->u.utility.name) > 10);
|
||||
l->u.utility.paramlen = s->param.size;
|
||||
if (l->u.utility.paramlen > sizeof(l->u.utility.params))
|
||||
|
|
|
@ -258,8 +258,8 @@ int cfctrl_linkup_request(struct cflayer *layer,
|
|||
tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
|
||||
cfpkt_add_body(pkt, &tmp16, 2);
|
||||
memset(utility_name, 0, sizeof(utility_name));
|
||||
strncpy(utility_name, param->u.utility.name,
|
||||
UTILITY_NAME_LENGTH - 1);
|
||||
strlcpy(utility_name, param->u.utility.name,
|
||||
UTILITY_NAME_LENGTH);
|
||||
cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
|
||||
tmp8 = param->u.utility.paramlen;
|
||||
cfpkt_add_body(pkt, &tmp8, 1);
|
||||
|
|
|
@ -1054,11 +1054,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
|
|||
*/
|
||||
goto out_err_free;
|
||||
|
||||
/* We are guaranteed to never error here with cBPF to eBPF
|
||||
* transitions, since there's no issue with type compatibility
|
||||
* checks on program arrays.
|
||||
*/
|
||||
fp = bpf_prog_select_runtime(fp, &err);
|
||||
if (err)
|
||||
goto out_err_free;
|
||||
|
||||
kfree(old_prog);
|
||||
return fp;
|
||||
|
|
|
@ -288,7 +288,7 @@ static int sock_diag_bind(struct net *net, int group)
|
|||
case SKNLGRP_INET6_UDP_DESTROY:
|
||||
if (!sock_diag_handlers[AF_INET6])
|
||||
request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
|
||||
NETLINK_SOCK_DIAG, AF_INET);
|
||||
NETLINK_SOCK_DIAG, AF_INET6);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -325,7 +325,13 @@ static struct ctl_table net_core_table[] = {
|
|||
.data = &bpf_jit_enable,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
.proc_handler = proc_dointvec
|
||||
#else
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = &one,
|
||||
.extra2 = &one,
|
||||
#endif
|
||||
},
|
||||
# ifdef CONFIG_HAVE_EBPF_JIT
|
||||
{
|
||||
|
|
|
@ -520,9 +520,11 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
|||
goto out;
|
||||
|
||||
/* hdrincl should be READ_ONCE(inet->hdrincl)
|
||||
* but READ_ONCE() doesn't work with bit fields
|
||||
* but READ_ONCE() doesn't work with bit fields.
|
||||
* Doing this indirectly yields the same result.
|
||||
*/
|
||||
hdrincl = inet->hdrincl;
|
||||
hdrincl = READ_ONCE(hdrincl);
|
||||
/*
|
||||
* Check the flags.
|
||||
*/
|
||||
|
|
|
@ -925,6 +925,15 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto,
|
|||
sr_phdr->segments[0] = **addr_p;
|
||||
*addr_p = &sr_ihdr->segments[sr_ihdr->segments_left];
|
||||
|
||||
if (sr_ihdr->hdrlen > hops * 2) {
|
||||
int tlvs_offset, tlvs_length;
|
||||
|
||||
tlvs_offset = (1 + hops * 2) << 3;
|
||||
tlvs_length = (sr_ihdr->hdrlen - hops * 2) << 3;
|
||||
memcpy((char *)sr_phdr + tlvs_offset,
|
||||
(char *)sr_ihdr + tlvs_offset, tlvs_length);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IPV6_SEG6_HMAC
|
||||
if (sr_has_hmac(sr_phdr)) {
|
||||
struct net *net = NULL;
|
||||
|
|
|
@ -637,6 +637,11 @@ static struct fib6_node *fib6_add_1(struct net *net,
|
|||
if (!(fn->fn_flags & RTN_RTINFO)) {
|
||||
RCU_INIT_POINTER(fn->leaf, NULL);
|
||||
rt6_release(leaf);
|
||||
/* remove null_entry in the root node */
|
||||
} else if (fn->fn_flags & RTN_TL_ROOT &&
|
||||
rcu_access_pointer(fn->leaf) ==
|
||||
net->ipv6.ip6_null_entry) {
|
||||
RCU_INIT_POINTER(fn->leaf, NULL);
|
||||
}
|
||||
|
||||
return fn;
|
||||
|
@ -1267,13 +1272,17 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
|
|||
return err;
|
||||
|
||||
failure:
|
||||
/* fn->leaf could be NULL if fn is an intermediate node and we
|
||||
* failed to add the new route to it in both subtree creation
|
||||
* failure and fib6_add_rt2node() failure case.
|
||||
* In both cases, fib6_repair_tree() should be called to fix
|
||||
* fn->leaf.
|
||||
/* fn->leaf could be NULL and fib6_repair_tree() needs to be called if:
|
||||
* 1. fn is an intermediate node and we failed to add the new
|
||||
* route to it in both subtree creation failure and fib6_add_rt2node()
|
||||
* failure case.
|
||||
* 2. fn is the root node in the table and we fail to add the first
|
||||
* default route to it.
|
||||
*/
|
||||
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
|
||||
if (fn &&
|
||||
(!(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)) ||
|
||||
(fn->fn_flags & RTN_TL_ROOT &&
|
||||
!rcu_access_pointer(fn->leaf))))
|
||||
fib6_repair_tree(info->nl_net, table, fn);
|
||||
/* Always release dst as dst->__refcnt is guaranteed
|
||||
* to be taken before entering this function
|
||||
|
@ -1528,6 +1537,12 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
|
|||
struct fib6_walker *w;
|
||||
int iter = 0;
|
||||
|
||||
/* Set fn->leaf to null_entry for root node. */
|
||||
if (fn->fn_flags & RTN_TL_ROOT) {
|
||||
rcu_assign_pointer(fn->leaf, net->ipv6.ip6_null_entry);
|
||||
return fn;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
struct fib6_node *fn_r = rcu_dereference_protected(fn->right,
|
||||
lockdep_is_held(&table->tb6_lock));
|
||||
|
@ -1683,10 +1698,15 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn,
|
|||
}
|
||||
read_unlock(&net->ipv6.fib6_walker_lock);
|
||||
|
||||
/* If it was last route, expunge its radix tree node */
|
||||
/* If it was last route, call fib6_repair_tree() to:
|
||||
* 1. For root node, put back null_entry as how the table was created.
|
||||
* 2. For other nodes, expunge its radix tree node.
|
||||
*/
|
||||
if (!rcu_access_pointer(fn->leaf)) {
|
||||
fn->fn_flags &= ~RTN_RTINFO;
|
||||
net->ipv6.rt6_stats->fib_route_nodes--;
|
||||
if (!(fn->fn_flags & RTN_TL_ROOT)) {
|
||||
fn->fn_flags &= ~RTN_RTINFO;
|
||||
net->ipv6.rt6_stats->fib_route_nodes--;
|
||||
}
|
||||
fn = fib6_repair_tree(net, table, fn);
|
||||
}
|
||||
|
||||
|
|
|
@ -1744,9 +1744,10 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
|
|||
cork.base.opt = NULL;
|
||||
v6_cork.opt = NULL;
|
||||
err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
|
||||
if (err)
|
||||
if (err) {
|
||||
ip6_cork_release(&cork, &v6_cork);
|
||||
return ERR_PTR(err);
|
||||
|
||||
}
|
||||
if (ipc6->dontfrag < 0)
|
||||
ipc6->dontfrag = inet6_sk(sk)->dontfrag;
|
||||
|
||||
|
|
|
@ -2307,7 +2307,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
|
|||
|
||||
if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
|
||||
event = sctp_ulpevent_make_sender_dry_event(asoc,
|
||||
GFP_ATOMIC);
|
||||
GFP_USER | __GFP_NOWARN);
|
||||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3531,6 +3531,8 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
|
|||
|
||||
if (optlen < sizeof(struct sctp_hmacalgo))
|
||||
return -EINVAL;
|
||||
optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
|
||||
SCTP_AUTH_NUM_HMACS * sizeof(u16));
|
||||
|
||||
hmacs = memdup_user(optval, optlen);
|
||||
if (IS_ERR(hmacs))
|
||||
|
@ -3569,6 +3571,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
|
|||
|
||||
if (optlen <= sizeof(struct sctp_authkey))
|
||||
return -EINVAL;
|
||||
/* authkey->sca_keylength is u16, so optlen can't be bigger than
|
||||
* this.
|
||||
*/
|
||||
optlen = min_t(unsigned int, optlen, USHRT_MAX +
|
||||
sizeof(struct sctp_authkey));
|
||||
|
||||
authkey = memdup_user(optval, optlen);
|
||||
if (IS_ERR(authkey))
|
||||
|
@ -3926,6 +3933,9 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
|
|||
|
||||
if (optlen < sizeof(*params))
|
||||
return -EINVAL;
|
||||
/* srs_number_streams is u16, so optlen can't be bigger than this. */
|
||||
optlen = min_t(unsigned int, optlen, USHRT_MAX +
|
||||
sizeof(__u16) * sizeof(*params));
|
||||
|
||||
params = memdup_user(optval, optlen);
|
||||
if (IS_ERR(params))
|
||||
|
@ -5075,7 +5085,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
|
|||
len = sizeof(int);
|
||||
if (put_user(len, optlen))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
|
||||
if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
@ -5705,6 +5715,9 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
|
|||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
/* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
|
||||
* but we can't change it anymore.
|
||||
*/
|
||||
if (put_user(bytes_copied, optlen))
|
||||
err = -EFAULT;
|
||||
out:
|
||||
|
@ -6141,7 +6154,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
|
|||
params.assoc_id = 0;
|
||||
} else if (len >= sizeof(struct sctp_assoc_value)) {
|
||||
len = sizeof(struct sctp_assoc_value);
|
||||
if (copy_from_user(¶ms, optval, sizeof(params)))
|
||||
if (copy_from_user(¶ms, optval, len))
|
||||
return -EFAULT;
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
@ -6311,7 +6324,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
|
|||
|
||||
if (len < sizeof(struct sctp_authkeyid))
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
|
||||
|
||||
len = sizeof(struct sctp_authkeyid);
|
||||
if (copy_from_user(&val, optval, len))
|
||||
return -EFAULT;
|
||||
|
||||
asoc = sctp_id2assoc(sk, val.scact_assoc_id);
|
||||
|
@ -6323,7 +6338,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
|
|||
else
|
||||
val.scact_keynumber = ep->active_key_id;
|
||||
|
||||
len = sizeof(struct sctp_authkeyid);
|
||||
if (put_user(len, optlen))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(optval, &val, len))
|
||||
|
@ -6349,7 +6363,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
|
|||
if (len < sizeof(struct sctp_authchunks))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
|
||||
if (copy_from_user(&val, optval, sizeof(val)))
|
||||
return -EFAULT;
|
||||
|
||||
to = p->gauth_chunks;
|
||||
|
@ -6394,7 +6408,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
|
|||
if (len < sizeof(struct sctp_authchunks))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
|
||||
if (copy_from_user(&val, optval, sizeof(val)))
|
||||
return -EFAULT;
|
||||
|
||||
to = p->gauth_chunks;
|
||||
|
|
13
net/socket.c
13
net/socket.c
|
@ -430,8 +430,10 @@ static int sock_map_fd(struct socket *sock, int flags)
|
|||
{
|
||||
struct file *newfile;
|
||||
int fd = get_unused_fd_flags(flags);
|
||||
if (unlikely(fd < 0))
|
||||
if (unlikely(fd < 0)) {
|
||||
sock_release(sock);
|
||||
return fd;
|
||||
}
|
||||
|
||||
newfile = sock_alloc_file(sock, flags, NULL);
|
||||
if (likely(!IS_ERR(newfile))) {
|
||||
|
@ -2611,6 +2613,15 @@ static int __init sock_init(void)
|
|||
|
||||
core_initcall(sock_init); /* early initcall */
|
||||
|
||||
static int __init jit_init(void)
|
||||
{
|
||||
#ifdef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
bpf_jit_enable = 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
pure_initcall(jit_init);
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
void socket_seq_show(struct seq_file *seq)
|
||||
{
|
||||
|
|
|
@ -455,7 +455,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
|
|||
v = snd_pcm_hw_param_last(pcm, params, var, dir);
|
||||
else
|
||||
v = snd_pcm_hw_param_first(pcm, params, var, dir);
|
||||
snd_BUG_ON(v < 0);
|
||||
return v;
|
||||
}
|
||||
|
||||
|
@ -1335,8 +1334,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
|
|||
|
||||
if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
|
||||
return tmp;
|
||||
mutex_lock(&runtime->oss.params_lock);
|
||||
while (bytes > 0) {
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
|
||||
tmp = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
|
||||
tmp = bytes;
|
||||
if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
|
||||
|
@ -1380,14 +1382,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
|
|||
xfer += tmp;
|
||||
if ((substream->f_flags & O_NONBLOCK) != 0 &&
|
||||
tmp != runtime->oss.period_bytes)
|
||||
break;
|
||||
tmp = -EAGAIN;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
return xfer;
|
||||
|
||||
err:
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
if (tmp < 0)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
tmp = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
tmp = 0;
|
||||
}
|
||||
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
|
||||
}
|
||||
|
||||
|
@ -1435,8 +1441,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
|
|||
|
||||
if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
|
||||
return tmp;
|
||||
mutex_lock(&runtime->oss.params_lock);
|
||||
while (bytes > 0) {
|
||||
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
|
||||
tmp = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
|
||||
if (runtime->oss.buffer_used == 0) {
|
||||
tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
|
||||
|
@ -1467,12 +1476,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
|
|||
bytes -= tmp;
|
||||
xfer += tmp;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
return xfer;
|
||||
|
||||
err:
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
mutex_unlock(&runtime->oss.params_lock);
|
||||
if (tmp < 0)
|
||||
break;
|
||||
if (signal_pending(current)) {
|
||||
tmp = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
tmp = 0;
|
||||
}
|
||||
return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
|
||||
}
|
||||
|
||||
|
|
|
@ -592,18 +592,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
|
|||
snd_pcm_sframes_t frames = size;
|
||||
|
||||
plugin = snd_pcm_plug_first(plug);
|
||||
while (plugin && frames > 0) {
|
||||
while (plugin) {
|
||||
if (frames <= 0)
|
||||
return frames;
|
||||
if ((next = plugin->next) != NULL) {
|
||||
snd_pcm_sframes_t frames1 = frames;
|
||||
if (plugin->dst_frames)
|
||||
if (plugin->dst_frames) {
|
||||
frames1 = plugin->dst_frames(plugin, frames);
|
||||
if (frames1 <= 0)
|
||||
return frames1;
|
||||
}
|
||||
if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
|
||||
return err;
|
||||
}
|
||||
if (err != frames1) {
|
||||
frames = err;
|
||||
if (plugin->src_frames)
|
||||
if (plugin->src_frames) {
|
||||
frames = plugin->src_frames(plugin, frames1);
|
||||
if (frames <= 0)
|
||||
return frames;
|
||||
}
|
||||
}
|
||||
} else
|
||||
dst_channels = NULL;
|
||||
|
|
|
@ -1632,7 +1632,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
|
|||
return changed;
|
||||
if (params->rmask) {
|
||||
int err = snd_pcm_hw_refine(pcm, params);
|
||||
if (snd_BUG_ON(err < 0))
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
return snd_pcm_hw_param_value(params, var, dir);
|
||||
|
@ -1678,7 +1678,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
|
|||
return changed;
|
||||
if (params->rmask) {
|
||||
int err = snd_pcm_hw_refine(pcm, params);
|
||||
if (snd_BUG_ON(err < 0))
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
return snd_pcm_hw_param_value(params, var, dir);
|
||||
|
|
|
@ -2580,7 +2580,7 @@ static snd_pcm_sframes_t forward_appl_ptr(struct snd_pcm_substream *substream,
|
|||
return ret < 0 ? ret : frames;
|
||||
}
|
||||
|
||||
/* decrease the appl_ptr; returns the processed frames or a negative error */
|
||||
/* decrease the appl_ptr; returns the processed frames or zero for error */
|
||||
static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
|
||||
snd_pcm_uframes_t frames,
|
||||
snd_pcm_sframes_t avail)
|
||||
|
@ -2597,7 +2597,12 @@ static snd_pcm_sframes_t rewind_appl_ptr(struct snd_pcm_substream *substream,
|
|||
if (appl_ptr < 0)
|
||||
appl_ptr += runtime->boundary;
|
||||
ret = pcm_lib_apply_appl_ptr(substream, appl_ptr);
|
||||
return ret < 0 ? ret : frames;
|
||||
/* NOTE: we return zero for errors because PulseAudio gets depressed
|
||||
* upon receiving an error from rewind ioctl and stops processing
|
||||
* any longer. Returning zero means that no rewind is done, so
|
||||
* it's not absolutely wrong to answer like that.
|
||||
*/
|
||||
return ret < 0 ? 0 : frames;
|
||||
}
|
||||
|
||||
static snd_pcm_sframes_t snd_pcm_playback_rewind(struct snd_pcm_substream *substream,
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <sound/core.h>
|
||||
#include <sound/control.h>
|
||||
#include <sound/pcm.h>
|
||||
#include <sound/pcm_params.h>
|
||||
#include <sound/info.h>
|
||||
#include <sound/initval.h>
|
||||
|
||||
|
@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void params_change_substream(struct loopback_pcm *dpcm,
|
||||
struct snd_pcm_runtime *runtime)
|
||||
{
|
||||
struct snd_pcm_runtime *dst_runtime;
|
||||
|
||||
if (dpcm == NULL || dpcm->substream == NULL)
|
||||
return;
|
||||
dst_runtime = dpcm->substream->runtime;
|
||||
if (dst_runtime == NULL)
|
||||
return;
|
||||
dst_runtime->hw = dpcm->cable->hw;
|
||||
}
|
||||
|
||||
static void params_change(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
|
@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
|
|||
cable->hw.rate_max = runtime->rate;
|
||||
cable->hw.channels_min = runtime->channels;
|
||||
cable->hw.channels_max = runtime->channels;
|
||||
params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
|
||||
runtime);
|
||||
params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
|
||||
runtime);
|
||||
}
|
||||
|
||||
static int loopback_prepare(struct snd_pcm_substream *substream)
|
||||
|
@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
|
|||
static int rule_format(struct snd_pcm_hw_params *params,
|
||||
struct snd_pcm_hw_rule *rule)
|
||||
{
|
||||
struct loopback_pcm *dpcm = rule->private;
|
||||
struct loopback_cable *cable = dpcm->cable;
|
||||
struct snd_mask m;
|
||||
|
||||
struct snd_pcm_hardware *hw = rule->private;
|
||||
struct snd_mask *maskp = hw_param_mask(params, rule->var);
|
||||
|
||||
maskp->bits[0] &= (u_int32_t)hw->formats;
|
||||
maskp->bits[1] &= (u_int32_t)(hw->formats >> 32);
|
||||
memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
|
||||
if (! maskp->bits[0] && ! maskp->bits[1])
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
snd_mask_none(&m);
|
||||
mutex_lock(&dpcm->loopback->cable_lock);
|
||||
m.bits[0] = (u_int32_t)cable->hw.formats;
|
||||
m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
|
||||
mutex_unlock(&dpcm->loopback->cable_lock);
|
||||
return snd_mask_refine(hw_param_mask(params, rule->var), &m);
|
||||
}
|
||||
|
||||
static int rule_rate(struct snd_pcm_hw_params *params,
|
||||
struct snd_pcm_hw_rule *rule)
|
||||
{
|
||||
struct snd_pcm_hardware *hw = rule->private;
|
||||
struct loopback_pcm *dpcm = rule->private;
|
||||
struct loopback_cable *cable = dpcm->cable;
|
||||
struct snd_interval t;
|
||||
|
||||
t.min = hw->rate_min;
|
||||
t.max = hw->rate_max;
|
||||
mutex_lock(&dpcm->loopback->cable_lock);
|
||||
t.min = cable->hw.rate_min;
|
||||
t.max = cable->hw.rate_max;
|
||||
mutex_unlock(&dpcm->loopback->cable_lock);
|
||||
t.openmin = t.openmax = 0;
|
||||
t.integer = 0;
|
||||
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
|
||||
|
@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
|
|||
static int rule_channels(struct snd_pcm_hw_params *params,
|
||||
struct snd_pcm_hw_rule *rule)
|
||||
{
|
||||
struct snd_pcm_hardware *hw = rule->private;
|
||||
struct loopback_pcm *dpcm = rule->private;
|
||||
struct loopback_cable *cable = dpcm->cable;
|
||||
struct snd_interval t;
|
||||
|
||||
t.min = hw->channels_min;
|
||||
t.max = hw->channels_max;
|
||||
mutex_lock(&dpcm->loopback->cable_lock);
|
||||
t.min = cable->hw.channels_min;
|
||||
t.max = cable->hw.channels_max;
|
||||
mutex_unlock(&dpcm->loopback->cable_lock);
|
||||
t.openmin = t.openmax = 0;
|
||||
t.integer = 0;
|
||||
return snd_interval_refine(hw_param_interval(params, rule->var), &t);
|
||||
}
|
||||
|
||||
static void free_cable(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct loopback *loopback = substream->private_data;
|
||||
int dev = get_cable_index(substream);
|
||||
struct loopback_cable *cable;
|
||||
|
||||
cable = loopback->cables[substream->number][dev];
|
||||
if (!cable)
|
||||
return;
|
||||
if (cable->streams[!substream->stream]) {
|
||||
/* other stream is still alive */
|
||||
cable->streams[substream->stream] = NULL;
|
||||
} else {
|
||||
/* free the cable */
|
||||
loopback->cables[substream->number][dev] = NULL;
|
||||
kfree(cable);
|
||||
}
|
||||
}
|
||||
|
||||
static int loopback_open(struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct loopback *loopback = substream->private_data;
|
||||
struct loopback_pcm *dpcm;
|
||||
struct loopback_cable *cable;
|
||||
struct loopback_cable *cable = NULL;
|
||||
int err = 0;
|
||||
int dev = get_cable_index(substream);
|
||||
|
||||
|
@ -681,7 +690,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
|
|||
if (!cable) {
|
||||
cable = kzalloc(sizeof(*cable), GFP_KERNEL);
|
||||
if (!cable) {
|
||||
kfree(dpcm);
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -699,19 +707,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
|
|||
/* are cached -> they do not reflect the actual state */
|
||||
err = snd_pcm_hw_rule_add(runtime, 0,
|
||||
SNDRV_PCM_HW_PARAM_FORMAT,
|
||||
rule_format, &runtime->hw,
|
||||
rule_format, dpcm,
|
||||
SNDRV_PCM_HW_PARAM_FORMAT, -1);
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
err = snd_pcm_hw_rule_add(runtime, 0,
|
||||
SNDRV_PCM_HW_PARAM_RATE,
|
||||
rule_rate, &runtime->hw,
|
||||
rule_rate, dpcm,
|
||||
SNDRV_PCM_HW_PARAM_RATE, -1);
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
err = snd_pcm_hw_rule_add(runtime, 0,
|
||||
SNDRV_PCM_HW_PARAM_CHANNELS,
|
||||
rule_channels, &runtime->hw,
|
||||
rule_channels, dpcm,
|
||||
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
|
@ -723,6 +731,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
|
|||
else
|
||||
runtime->hw = cable->hw;
|
||||
unlock:
|
||||
if (err < 0) {
|
||||
free_cable(substream);
|
||||
kfree(dpcm);
|
||||
}
|
||||
mutex_unlock(&loopback->cable_lock);
|
||||
return err;
|
||||
}
|
||||
|
@ -731,20 +743,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
|
|||
{
|
||||
struct loopback *loopback = substream->private_data;
|
||||
struct loopback_pcm *dpcm = substream->runtime->private_data;
|
||||
struct loopback_cable *cable;
|
||||
int dev = get_cable_index(substream);
|
||||
|
||||
loopback_timer_stop(dpcm);
|
||||
mutex_lock(&loopback->cable_lock);
|
||||
cable = loopback->cables[substream->number][dev];
|
||||
if (cable->streams[!substream->stream]) {
|
||||
/* other stream is still alive */
|
||||
cable->streams[substream->stream] = NULL;
|
||||
} else {
|
||||
/* free the cable */
|
||||
loopback->cables[substream->number][dev] = NULL;
|
||||
kfree(cable);
|
||||
}
|
||||
free_cable(substream);
|
||||
mutex_unlock(&loopback->cable_lock);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -474,27 +474,7 @@ static struct bpf_align_test tests[] = {
|
|||
.result = REJECT,
|
||||
.matches = {
|
||||
{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
|
||||
/* ptr & 0x40 == either 0 or 0x40 */
|
||||
{5, "R5_w=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"},
|
||||
/* ptr << 2 == unknown, (4n) */
|
||||
{7, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
|
||||
/* (4n) + 14 == (4n+2). We blow our bounds, because
|
||||
* the add could overflow.
|
||||
*/
|
||||
{8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
|
||||
/* Checked s>=0 */
|
||||
{10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
/* packet pointer + nonnegative (4n+2) */
|
||||
{12, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
{14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
|
||||
* We checked the bounds, but it might have been able
|
||||
* to overflow if the packet pointer started in the
|
||||
* upper half of the address space.
|
||||
* So we did not get a 'range' on R6, and the access
|
||||
* attempt will fail.
|
||||
*/
|
||||
{16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
|
||||
/* R5 bitwise operator &= on pointer prohibited */
|
||||
}
|
||||
},
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue