mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-linus' into work.misc
This commit is contained in:
commit
6108209c4a
|
@ -40,18 +40,18 @@ Optional properties:
|
|||
|
||||
Slave Properties:
|
||||
Required properties:
|
||||
- phy_id : Specifies slave phy id
|
||||
- phy-mode : See ethernet.txt file in the same directory
|
||||
|
||||
Optional properties:
|
||||
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
|
||||
- mac-address : See ethernet.txt file in the same directory
|
||||
- phy_id : Specifies slave phy id
|
||||
- phy-handle : See ethernet.txt file in the same directory
|
||||
|
||||
Slave sub-nodes:
|
||||
- fixed-link : See fixed-link.txt file in the same directory
|
||||
Either the properties phy_id and phy-mode,
|
||||
or the sub-node fixed-link can be specified
|
||||
Either the property phy_id, or the sub-node
|
||||
fixed-link can be specified
|
||||
|
||||
Note: "ti,hwmods" field is used to fetch the base address and irq
|
||||
resources from TI, omap hwmod data base during device registration.
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -193,15 +193,44 @@ struct oabi_flock64 {
|
|||
pid_t l_pid;
|
||||
} __attribute__ ((packed,aligned(4)));
|
||||
|
||||
static long do_locks(unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct flock64 kernel;
|
||||
struct oabi_flock64 user;
|
||||
mm_segment_t fs;
|
||||
long ret;
|
||||
|
||||
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
|
||||
sizeof(user)))
|
||||
return -EFAULT;
|
||||
kernel.l_type = user.l_type;
|
||||
kernel.l_whence = user.l_whence;
|
||||
kernel.l_start = user.l_start;
|
||||
kernel.l_len = user.l_len;
|
||||
kernel.l_pid = user.l_pid;
|
||||
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel);
|
||||
set_fs(fs);
|
||||
|
||||
if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) {
|
||||
user.l_type = kernel.l_type;
|
||||
user.l_whence = kernel.l_whence;
|
||||
user.l_start = kernel.l_start;
|
||||
user.l_len = kernel.l_len;
|
||||
user.l_pid = kernel.l_pid;
|
||||
if (copy_to_user((struct oabi_flock64 __user *)arg,
|
||||
&user, sizeof(user)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct oabi_flock64 user;
|
||||
struct flock64 kernel;
|
||||
mm_segment_t fs = USER_DS; /* initialized to kill a warning */
|
||||
unsigned long local_arg = arg;
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
case F_OFD_GETLK:
|
||||
case F_OFD_SETLK:
|
||||
|
@ -209,39 +238,11 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
|
|||
case F_GETLK64:
|
||||
case F_SETLK64:
|
||||
case F_SETLKW64:
|
||||
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
|
||||
sizeof(user)))
|
||||
return -EFAULT;
|
||||
kernel.l_type = user.l_type;
|
||||
kernel.l_whence = user.l_whence;
|
||||
kernel.l_start = user.l_start;
|
||||
kernel.l_len = user.l_len;
|
||||
kernel.l_pid = user.l_pid;
|
||||
local_arg = (unsigned long)&kernel;
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
return do_locks(fd, cmd, arg);
|
||||
|
||||
default:
|
||||
return sys_fcntl64(fd, cmd, arg);
|
||||
}
|
||||
|
||||
ret = sys_fcntl64(fd, cmd, local_arg);
|
||||
|
||||
switch (cmd) {
|
||||
case F_GETLK64:
|
||||
if (!ret) {
|
||||
user.l_type = kernel.l_type;
|
||||
user.l_whence = kernel.l_whence;
|
||||
user.l_start = kernel.l_start;
|
||||
user.l_len = kernel.l_len;
|
||||
user.l_pid = kernel.l_pid;
|
||||
if (copy_to_user((struct oabi_flock64 __user *)arg,
|
||||
&user, sizeof(user)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
case F_SETLK64:
|
||||
case F_SETLKW64:
|
||||
set_fs(fs);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct oabi_epoll_event {
|
||||
|
|
|
@ -3,6 +3,7 @@ generic-y += clkdev.h
|
|||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += irq_work.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += module.h
|
||||
|
|
|
@ -168,13 +168,21 @@ static inline void _writel(unsigned long l, unsigned long addr)
|
|||
#define writew_relaxed writew
|
||||
#define writel_relaxed writel
|
||||
|
||||
#define ioread8 read
|
||||
#define ioread8 readb
|
||||
#define ioread16 readw
|
||||
#define ioread32 readl
|
||||
#define iowrite8 writeb
|
||||
#define iowrite16 writew
|
||||
#define iowrite32 writel
|
||||
|
||||
#define ioread8_rep(p, dst, count) insb((unsigned long)(p), (dst), (count))
|
||||
#define ioread16_rep(p, dst, count) insw((unsigned long)(p), (dst), (count))
|
||||
#define ioread32_rep(p, dst, count) insl((unsigned long)(p), (dst), (count))
|
||||
|
||||
#define iowrite8_rep(p, src, count) outsb((unsigned long)(p), (src), (count))
|
||||
#define iowrite16_rep(p, src, count) outsw((unsigned long)(p), (src), (count))
|
||||
#define iowrite32_rep(p, src, count) outsl((unsigned long)(p), (src), (count))
|
||||
|
||||
#define ioread16be(addr) be16_to_cpu(readw(addr))
|
||||
#define ioread32be(addr) be32_to_cpu(readl(addr))
|
||||
#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
|
||||
|
|
|
@ -26,7 +26,7 @@ aflags-vdso := $(ccflags-vdso) \
|
|||
# the comments on that file.
|
||||
#
|
||||
ifndef CONFIG_CPU_MIPSR6
|
||||
ifeq ($(call ld-ifversion, -lt, 22500000, y),)
|
||||
ifeq ($(call ld-ifversion, -lt, 22500000, y),y)
|
||||
$(warning MIPS VDSO requires binutils >= 2.25)
|
||||
obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
|
||||
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
||||
|
|
|
@ -418,8 +418,12 @@
|
|||
#define __NR_execveat 350
|
||||
#define __NR_membarrier 351
|
||||
#define __NR_userfaultfd 352
|
||||
#define __NR_bind 353
|
||||
#define __NR_listen 354
|
||||
#define __NR_setsockopt 355
|
||||
#define __NR_mlock2 356
|
||||
|
||||
#define NR_syscalls 353
|
||||
#define NR_syscalls 357
|
||||
|
||||
/* Bitmask values returned from kern_features system call. */
|
||||
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
|
||||
|
|
|
@ -35,18 +35,18 @@ sys_call_table:
|
|||
/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64
|
||||
/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
|
||||
/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid
|
||||
/*95*/ .long sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
|
||||
/*95*/ .long sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
|
||||
/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
|
||||
/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
|
||||
/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
|
||||
/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_getcwd
|
||||
/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_recvmsg, sys_sendmsg
|
||||
/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
|
||||
/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod
|
||||
/*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
|
||||
/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
|
||||
/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
|
||||
/*140*/ .long sys_sendfile64, sys_nis_syscall, sys_futex, sys_gettid, sys_getrlimit
|
||||
/*125*/ .long sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
|
||||
/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown
|
||||
/*135*/ .long sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
|
||||
/*140*/ .long sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
|
||||
/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
|
||||
/*150*/ .long sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
|
||||
/*150*/ .long sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
|
||||
/*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
|
||||
/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
|
||||
/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
|
||||
|
@ -87,4 +87,5 @@ sys_call_table:
|
|||
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
|
||||
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
|
||||
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd
|
||||
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||
/*355*/ .long sys_setsockopt, sys_mlock2
|
||||
|
|
|
@ -37,15 +37,15 @@ sys_call_table32:
|
|||
/*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, sys32_ftruncate64
|
||||
.word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
|
||||
/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
|
||||
.word sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
|
||||
.word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
|
||||
/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending
|
||||
.word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
|
||||
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
|
||||
.word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, sys_nis_syscall, sys_getcwd
|
||||
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, compat_sys_recvmsg, compat_sys_sendmsg
|
||||
.word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, compat_sys_getsockopt, sys_getcwd
|
||||
/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
|
||||
.word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
|
||||
/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
|
||||
.word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
|
||||
.word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
|
||||
/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
|
||||
.word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
|
||||
/*140*/ .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
|
||||
.word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
|
||||
/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
|
||||
|
@ -88,7 +88,8 @@ sys_call_table32:
|
|||
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
|
||||
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
|
||||
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd
|
||||
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||
.word compat_sys_setsockopt, sys_mlock2
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
|
@ -168,4 +169,5 @@ sys_call_table:
|
|||
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
|
||||
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
|
||||
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd
|
||||
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||
.word sys_setsockopt, sys_mlock2
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/events.h>
|
||||
|
|
|
@ -206,6 +206,22 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_delay_queue);
|
||||
|
||||
/**
|
||||
* blk_start_queue_async - asynchronously restart a previously stopped queue
|
||||
* @q: The &struct request_queue in question
|
||||
*
|
||||
* Description:
|
||||
* blk_start_queue_async() will clear the stop flag on the queue, and
|
||||
* ensure that the request_fn for the queue is run from an async
|
||||
* context.
|
||||
**/
|
||||
void blk_start_queue_async(struct request_queue *q)
|
||||
{
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
blk_run_queue_async(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_start_queue_async);
|
||||
|
||||
/**
|
||||
* blk_start_queue - restart a previously stopped queue
|
||||
* @q: The &struct request_queue in question
|
||||
|
|
|
@ -81,7 +81,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
struct bio *new = NULL;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
|
||||
if (sectors + (bv.bv_len >> 9) > blk_max_size_offset(q, bio->bi_iter.bi_sector))
|
||||
goto split;
|
||||
|
||||
/*
|
||||
|
|
|
@ -47,7 +47,7 @@ struct skcipher_ctx {
|
|||
bool merge;
|
||||
bool enc;
|
||||
|
||||
struct ablkcipher_request req;
|
||||
struct skcipher_request req;
|
||||
};
|
||||
|
||||
struct skcipher_async_rsgl {
|
||||
|
@ -64,13 +64,13 @@ struct skcipher_async_req {
|
|||
};
|
||||
|
||||
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
|
||||
crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
|
||||
|
||||
#define GET_REQ_SIZE(ctx) \
|
||||
crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define GET_IV_SIZE(ctx) \
|
||||
crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
|
||||
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
|
||||
sizeof(struct scatterlist) - 1)
|
||||
|
@ -302,8 +302,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
|
||||
unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
|
||||
unsigned ivsize = crypto_skcipher_ivsize(tfm);
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct af_alg_control con = {};
|
||||
long copied = 0;
|
||||
|
@ -507,7 +507,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
struct skcipher_async_req *sreq;
|
||||
struct ablkcipher_request *req;
|
||||
struct skcipher_request *req;
|
||||
struct skcipher_async_rsgl *last_rsgl = NULL;
|
||||
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
|
||||
unsigned int reqlen = sizeof(struct skcipher_async_req) +
|
||||
|
@ -531,9 +531,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
}
|
||||
sg_init_table(sreq->tsg, tx_nents);
|
||||
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
|
||||
ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
|
||||
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_async_cb, sk);
|
||||
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_async_cb, sk);
|
||||
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
struct skcipher_async_rsgl *rsgl;
|
||||
|
@ -608,10 +608,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
if (mark)
|
||||
sg_mark_end(sreq->tsg + txbufs - 1);
|
||||
|
||||
ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
|
||||
len, sreq->iv);
|
||||
err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
|
||||
crypto_ablkcipher_decrypt(req);
|
||||
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
|
||||
len, sreq->iv);
|
||||
err = ctx->enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
if (err == -EINPROGRESS) {
|
||||
atomic_inc(&ctx->inflight);
|
||||
err = -EIOCBQUEUED;
|
||||
|
@ -632,7 +632,7 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
|
|||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
|
||||
unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
|
||||
&ctx->req));
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
|
@ -669,14 +669,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
|
|||
if (!used)
|
||||
goto free;
|
||||
|
||||
ablkcipher_request_set_crypt(&ctx->req, sg,
|
||||
ctx->rsgl.sg, used,
|
||||
ctx->iv);
|
||||
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
|
||||
ctx->iv);
|
||||
|
||||
err = af_alg_wait_for_completion(
|
||||
ctx->enc ?
|
||||
crypto_ablkcipher_encrypt(&ctx->req) :
|
||||
crypto_ablkcipher_decrypt(&ctx->req),
|
||||
crypto_skcipher_encrypt(&ctx->req) :
|
||||
crypto_skcipher_decrypt(&ctx->req),
|
||||
&ctx->completion);
|
||||
|
||||
free:
|
||||
|
@ -751,17 +750,17 @@ static struct proto_ops algif_skcipher_ops = {
|
|||
|
||||
static void *skcipher_bind(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_ablkcipher(name, type, mask);
|
||||
return crypto_alloc_skcipher(name, type, mask);
|
||||
}
|
||||
|
||||
static void skcipher_release(void *private)
|
||||
{
|
||||
crypto_free_ablkcipher(private);
|
||||
crypto_free_skcipher(private);
|
||||
}
|
||||
|
||||
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto_ablkcipher_setkey(private, key, keylen);
|
||||
return crypto_skcipher_setkey(private, key, keylen);
|
||||
}
|
||||
|
||||
static void skcipher_wait(struct sock *sk)
|
||||
|
@ -778,13 +777,13 @@ static void skcipher_sock_destruct(struct sock *sk)
|
|||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
|
||||
|
||||
if (atomic_read(&ctx->inflight))
|
||||
skcipher_wait(sk);
|
||||
|
||||
skcipher_free_sgl(sk);
|
||||
sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
|
||||
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
|
||||
sock_kfree_s(sk, ctx, ctx->len);
|
||||
af_alg_release_parent(sk);
|
||||
}
|
||||
|
@ -793,20 +792,20 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
|
|||
{
|
||||
struct skcipher_ctx *ctx;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
|
||||
unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
|
||||
|
||||
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
|
||||
ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->iv) {
|
||||
sock_kfree_s(sk, ctx, len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
|
||||
memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
|
||||
|
||||
INIT_LIST_HEAD(&ctx->tsgl);
|
||||
ctx->len = len;
|
||||
|
@ -819,9 +818,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
|
|||
|
||||
ask->private = ctx;
|
||||
|
||||
ablkcipher_request_set_tfm(&ctx->req, private);
|
||||
ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
skcipher_request_set_tfm(&ctx->req, private);
|
||||
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
|
||||
sk->sk_destruct = skcipher_sock_destruct;
|
||||
|
||||
|
|
|
@ -232,20 +232,19 @@ static void end_cmd(struct nullb_cmd *cmd)
|
|||
break;
|
||||
case NULL_Q_BIO:
|
||||
bio_endio(cmd->bio);
|
||||
goto free_cmd;
|
||||
break;
|
||||
}
|
||||
|
||||
free_cmd(cmd);
|
||||
|
||||
/* Restart queue if needed, as we are freeing a tag */
|
||||
if (q && !q->mq_ops && blk_queue_stopped(q)) {
|
||||
if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_queue_stopped(q))
|
||||
blk_start_queue(q);
|
||||
blk_start_queue_async(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
free_cmd:
|
||||
free_cmd(cmd);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
|
||||
|
|
|
@ -12123,18 +12123,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
|
|||
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = state->dev;
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
unsigned int used_ports = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Walk the connector list instead of the encoder
|
||||
* list to detect the problem on ddi platforms
|
||||
* where there's just one encoder per digital port.
|
||||
*/
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
drm_for_each_connector(connector, dev) {
|
||||
struct drm_connector_state *connector_state;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
connector_state = drm_atomic_get_existing_connector_state(state, connector);
|
||||
if (!connector_state)
|
||||
connector_state = connector->state;
|
||||
|
||||
if (!connector_state->best_encoder)
|
||||
continue;
|
||||
|
||||
|
|
|
@ -1381,7 +1381,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
|||
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
for (try = 0; !live_status && try < 4; try++) {
|
||||
for (try = 0; !live_status && try < 9; try++) {
|
||||
if (try)
|
||||
msleep(10);
|
||||
live_status = intel_digital_port_connected(dev_priv,
|
||||
|
|
|
@ -1265,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
|
|||
return cma_protocol_roce_dev_port(device, port_num);
|
||||
}
|
||||
|
||||
static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
|
||||
const struct net_device *net_dev)
|
||||
static bool cma_match_net_dev(const struct rdma_cm_id *id,
|
||||
const struct net_device *net_dev,
|
||||
u8 port_num)
|
||||
{
|
||||
const struct rdma_addr *addr = &id_priv->id.route.addr;
|
||||
const struct rdma_addr *addr = &id->route.addr;
|
||||
|
||||
if (!net_dev)
|
||||
/* This request is an AF_IB request or a RoCE request */
|
||||
return addr->src_addr.ss_family == AF_IB ||
|
||||
cma_protocol_roce(&id_priv->id);
|
||||
return (!id->port_num || id->port_num == port_num) &&
|
||||
(addr->src_addr.ss_family == AF_IB ||
|
||||
cma_protocol_roce_dev_port(id->device, port_num));
|
||||
|
||||
return !addr->dev_addr.bound_dev_if ||
|
||||
(net_eq(dev_net(net_dev), addr->dev_addr.net) &&
|
||||
|
@ -1295,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener(
|
|||
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
|
||||
if (cma_match_private_data(id_priv, ib_event->private_data)) {
|
||||
if (id_priv->id.device == cm_id->device &&
|
||||
cma_match_net_dev(id_priv, net_dev))
|
||||
cma_match_net_dev(&id_priv->id, net_dev, req->port))
|
||||
return id_priv;
|
||||
list_for_each_entry(id_priv_dev,
|
||||
&id_priv->listen_list,
|
||||
listen_list) {
|
||||
if (id_priv_dev->id.device == cm_id->device &&
|
||||
cma_match_net_dev(id_priv_dev, net_dev))
|
||||
cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
|
||||
return id_priv_dev;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -286,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
|
|||
mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
|
||||
ib_umem_release(msrq->umem);
|
||||
} else {
|
||||
kfree(msrq->wrid);
|
||||
kvfree(msrq->wrid);
|
||||
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
|
||||
&msrq->buf);
|
||||
mlx4_db_free(dev->dev, &msrq->db);
|
||||
|
|
|
@ -232,6 +232,10 @@ struct phy_info {
|
|||
u16 interface_type;
|
||||
};
|
||||
|
||||
enum ocrdma_flags {
|
||||
OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
|
||||
};
|
||||
|
||||
struct ocrdma_dev {
|
||||
struct ib_device ibdev;
|
||||
struct ocrdma_dev_attr attr;
|
||||
|
@ -287,6 +291,7 @@ struct ocrdma_dev {
|
|||
atomic_t update_sl;
|
||||
u16 pvid;
|
||||
u32 asic_id;
|
||||
u32 flags;
|
||||
|
||||
ulong last_stats_time;
|
||||
struct mutex stats_lock; /* provide synch for debugfs operations */
|
||||
|
@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state)
|
|||
(state & OCRDMA_STATE_FLAG_SYNC);
|
||||
}
|
||||
|
||||
static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
|
||||
{
|
||||
return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
|
|||
|
||||
cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
|
||||
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
|
||||
/* Request link events on this MQ. */
|
||||
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
|
||||
|
||||
cmd->async_cqid_ringsize = cq->id;
|
||||
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
|
||||
|
@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
|
|||
}
|
||||
}
|
||||
|
||||
static void ocrdma_process_link_state(struct ocrdma_dev *dev,
|
||||
struct ocrdma_ae_mcqe *cqe)
|
||||
{
|
||||
struct ocrdma_ae_lnkst_mcqe *evt;
|
||||
u8 lstate;
|
||||
|
||||
evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
|
||||
lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
|
||||
|
||||
if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
|
||||
return;
|
||||
|
||||
if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
|
||||
ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
|
||||
}
|
||||
|
||||
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
|
||||
{
|
||||
/* async CQE processing */
|
||||
struct ocrdma_ae_mcqe *cqe = ae_cqe;
|
||||
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
|
||||
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
|
||||
|
||||
if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
|
||||
switch (evt_code) {
|
||||
case OCRDMA_ASYNC_LINK_EVE_CODE:
|
||||
ocrdma_process_link_state(dev, cqe);
|
||||
break;
|
||||
case OCRDMA_ASYNC_RDMA_EVE_CODE:
|
||||
ocrdma_dispatch_ibevent(dev, cqe);
|
||||
else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
|
||||
break;
|
||||
case OCRDMA_ASYNC_GRP5_EVE_CODE:
|
||||
ocrdma_process_grp5_aync(dev, cqe);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
|
||||
dev->id, evt_code);
|
||||
}
|
||||
}
|
||||
|
||||
static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
|
||||
|
@ -1363,7 +1387,8 @@ static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
|
|||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
|
||||
u8 *lnk_state)
|
||||
{
|
||||
int status = -ENOMEM;
|
||||
struct ocrdma_get_link_speed_rsp *rsp;
|
||||
|
@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
|
|||
goto mbx_err;
|
||||
|
||||
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
|
||||
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
|
||||
>> OCRDMA_PHY_PS_SHIFT;
|
||||
if (lnk_speed)
|
||||
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
|
||||
>> OCRDMA_PHY_PS_SHIFT;
|
||||
if (lnk_state)
|
||||
*lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
|
||||
|
||||
mbx_err:
|
||||
kfree(cmd);
|
||||
|
@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
|||
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
|
||||
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
|
||||
|
||||
if (vlan_id < 0x1000) {
|
||||
if (dev->pfc_state) {
|
||||
vlan_id = 0;
|
||||
if (vlan_id == 0xFFFF)
|
||||
vlan_id = 0;
|
||||
if (vlan_id || dev->pfc_state) {
|
||||
if (!vlan_id) {
|
||||
pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
|
||||
dev->id);
|
||||
pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
|
||||
|
|
|
@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
|
|||
bool solicited, u16 cqe_popped);
|
||||
|
||||
/* verbs specific mailbox commands */
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
|
||||
u8 *lnk_st);
|
||||
int ocrdma_query_config(struct ocrdma_dev *,
|
||||
struct ocrdma_mbx_query_config *config);
|
||||
|
||||
|
@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev);
|
|||
void ocrdma_init_service_level(struct ocrdma_dev *);
|
||||
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
|
||||
void ocrdma_free_pd_range(struct ocrdma_dev *dev);
|
||||
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
|
||||
|
||||
#endif /* __OCRDMA_HW_H__ */
|
||||
|
|
|
@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
|
|||
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
|
||||
{
|
||||
int status = 0, i;
|
||||
u8 lstate = 0;
|
||||
struct ocrdma_dev *dev;
|
||||
|
||||
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
|
||||
|
@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
|
|||
if (status)
|
||||
goto alloc_err;
|
||||
|
||||
/* Query Link state and update */
|
||||
status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
|
||||
if (!status)
|
||||
ocrdma_update_link_state(dev, lstate);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
|
||||
if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
|
||||
goto sysfs_err;
|
||||
|
@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
|
|||
ocrdma_remove_free(dev);
|
||||
}
|
||||
|
||||
static int ocrdma_open(struct ocrdma_dev *dev)
|
||||
static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
|
||||
{
|
||||
struct ib_event port_event;
|
||||
|
||||
|
@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ocrdma_close(struct ocrdma_dev *dev)
|
||||
static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
|
||||
{
|
||||
int i;
|
||||
struct ocrdma_qp *qp, **cur_qp;
|
||||
struct ib_event err_event;
|
||||
struct ib_qp_attr attrs;
|
||||
int attr_mask = IB_QP_STATE;
|
||||
|
||||
attrs.qp_state = IB_QPS_ERR;
|
||||
mutex_lock(&dev->dev_lock);
|
||||
if (dev->qp_tbl) {
|
||||
cur_qp = dev->qp_tbl;
|
||||
for (i = 0; i < OCRDMA_MAX_QP; i++) {
|
||||
qp = cur_qp[i];
|
||||
if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
|
||||
/* change the QP state to ERROR */
|
||||
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
|
||||
|
||||
err_event.event = IB_EVENT_QP_FATAL;
|
||||
err_event.element.qp = &qp->ibqp;
|
||||
err_event.device = &dev->ibdev;
|
||||
ib_dispatch_event(&err_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->dev_lock);
|
||||
|
||||
err_event.event = IB_EVENT_PORT_ERR;
|
||||
err_event.element.port_num = 1;
|
||||
|
@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
|
|||
|
||||
static void ocrdma_shutdown(struct ocrdma_dev *dev)
|
||||
{
|
||||
ocrdma_close(dev);
|
||||
ocrdma_dispatch_port_error(dev);
|
||||
ocrdma_remove(dev);
|
||||
}
|
||||
|
||||
|
@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev)
|
|||
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
|
||||
{
|
||||
switch (event) {
|
||||
case BE_DEV_UP:
|
||||
ocrdma_open(dev);
|
||||
break;
|
||||
case BE_DEV_DOWN:
|
||||
ocrdma_close(dev);
|
||||
break;
|
||||
case BE_DEV_SHUTDOWN:
|
||||
ocrdma_shutdown(dev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
|
||||
{
|
||||
if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
|
||||
dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
|
||||
if (!lstate)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!lstate)
|
||||
ocrdma_dispatch_port_error(dev);
|
||||
else
|
||||
ocrdma_dispatch_port_active(dev);
|
||||
}
|
||||
|
||||
static struct ocrdma_driver ocrdma_drv = {
|
||||
.name = "ocrdma_driver",
|
||||
.add = ocrdma_add,
|
||||
|
|
|
@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe {
|
|||
u32 valid_ae_event;
|
||||
};
|
||||
|
||||
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
|
||||
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
|
||||
enum ocrdma_async_event_code {
|
||||
OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
|
||||
OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
|
||||
OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
|
||||
};
|
||||
|
||||
enum ocrdma_async_grp5_events {
|
||||
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
|
||||
|
@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
|
|||
OCRDMA_MAX_ASYNC_ERRORS
|
||||
};
|
||||
|
||||
struct ocrdma_ae_lnkst_mcqe {
|
||||
u32 speed_state_ptn;
|
||||
u32 qos_reason_falut;
|
||||
u32 evt_tag;
|
||||
u32 valid_ae_event;
|
||||
};
|
||||
|
||||
enum {
|
||||
OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
|
||||
OCRDMA_AE_LSC_PT_SHIFT = 0x06,
|
||||
OCRDMA_AE_LSC_PT_MASK = (0x03 <<
|
||||
OCRDMA_AE_LSC_PT_SHIFT),
|
||||
OCRDMA_AE_LSC_LS_SHIFT = 0x08,
|
||||
OCRDMA_AE_LSC_LS_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_LS_SHIFT),
|
||||
OCRDMA_AE_LSC_LD_SHIFT = 0x10,
|
||||
OCRDMA_AE_LSC_LD_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_LD_SHIFT),
|
||||
OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
|
||||
OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_PPS_SHIFT),
|
||||
OCRDMA_AE_LSC_PPF_MASK = 0xFF,
|
||||
OCRDMA_AE_LSC_ER_SHIFT = 0x08,
|
||||
OCRDMA_AE_LSC_ER_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_ER_SHIFT),
|
||||
OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
|
||||
OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
|
||||
OCRDMA_AE_LSC_QOS_SHIFT)
|
||||
};
|
||||
|
||||
enum {
|
||||
OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
|
||||
OCRDMA_AE_LSC_PLINK_UP = 0x01,
|
||||
OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
|
||||
OCRDMA_AE_LSC_LLINK_MASK = 0x02,
|
||||
OCRDMA_AE_LSC_LLINK_UP = 0x03
|
||||
};
|
||||
|
||||
/* mailbox command request and responses */
|
||||
enum {
|
||||
OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
|
||||
|
@ -676,7 +717,7 @@ enum {
|
|||
OCRDMA_PHY_PFLT_SHIFT = 0x18,
|
||||
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
|
||||
OCRDMA_QOS_LNKSP_SHIFT = 0x10,
|
||||
OCRDMA_LLST_MASK = 0xFF,
|
||||
OCRDMA_LINK_ST_MASK = 0x01,
|
||||
OCRDMA_PLFC_MASK = 0x00000400,
|
||||
OCRDMA_PLFC_SHIFT = 0x8,
|
||||
OCRDMA_PLRFC_MASK = 0x00000200,
|
||||
|
@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp {
|
|||
|
||||
u32 pflt_pps_ld_pnum;
|
||||
u32 qos_lsp;
|
||||
u32 res_lls;
|
||||
u32 res_lnk_st;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
|
|||
int status;
|
||||
u8 speed;
|
||||
|
||||
status = ocrdma_mbx_get_link_speed(dev, &speed);
|
||||
status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
|
||||
if (status)
|
||||
speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
|
|||
struct nvm_block *blk;
|
||||
int i;
|
||||
|
||||
lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
|
||||
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
|
||||
|
||||
for (i = 0; i < nr_blocks; i++) {
|
||||
if (blks[i] == 0)
|
||||
|
|
|
@ -3430,25 +3430,29 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
|
|||
return rc;
|
||||
}
|
||||
|
||||
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
|
||||
/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
|
||||
#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
|
||||
|
||||
/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
|
||||
#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
|
||||
|
||||
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
|
||||
/* check if packet requires linearization (packet is too fragmented)
|
||||
no need to check fragmentation if page size > 8K (there will be no
|
||||
violation to FW restrictions) */
|
||||
static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
|
||||
u32 xmit_type)
|
||||
{
|
||||
int to_copy = 0;
|
||||
int hlen = 0;
|
||||
int first_bd_sz = 0;
|
||||
int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
|
||||
int to_copy = 0, hlen = 0;
|
||||
|
||||
/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
|
||||
if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
|
||||
if (xmit_type & XMIT_GSO_ENC)
|
||||
num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
|
||||
if (xmit_type & XMIT_GSO) {
|
||||
unsigned short lso_mss = skb_shinfo(skb)->gso_size;
|
||||
/* Check if LSO packet needs to be copied:
|
||||
3 = 1 (for headers BD) + 2 (for PBD and last BD) */
|
||||
int wnd_size = MAX_FETCH_BD - 3;
|
||||
int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
|
||||
/* Number of windows to check */
|
||||
int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
|
||||
int wnd_idx = 0;
|
||||
|
|
|
@ -848,8 +848,6 @@ void be_roce_dev_remove(struct be_adapter *);
|
|||
/*
|
||||
* internal function to open-close roce device during ifup-ifdown.
|
||||
*/
|
||||
void be_roce_dev_open(struct be_adapter *);
|
||||
void be_roce_dev_close(struct be_adapter *);
|
||||
void be_roce_dev_shutdown(struct be_adapter *);
|
||||
|
||||
#endif /* BE_H */
|
||||
|
|
|
@ -3299,8 +3299,10 @@ static int be_msix_register(struct be_adapter *adapter)
|
|||
|
||||
return 0;
|
||||
err_msix:
|
||||
for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
|
||||
for (i--; i >= 0; i--) {
|
||||
eqo = &adapter->eq_obj[i];
|
||||
free_irq(be_msix_vec_get(adapter, eqo), eqo);
|
||||
}
|
||||
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
|
||||
status);
|
||||
be_msix_disable(adapter);
|
||||
|
@ -3432,8 +3434,6 @@ static int be_close(struct net_device *netdev)
|
|||
|
||||
be_disable_if_filters(adapter);
|
||||
|
||||
be_roce_dev_close(adapter);
|
||||
|
||||
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
|
||||
for_all_evt_queues(adapter, eqo, i) {
|
||||
napi_disable(&eqo->napi);
|
||||
|
@ -3601,8 +3601,6 @@ static int be_open(struct net_device *netdev)
|
|||
be_link_status_update(adapter, link_status);
|
||||
|
||||
netif_tx_start_all_queues(netdev);
|
||||
be_roce_dev_open(adapter);
|
||||
|
||||
#ifdef CONFIG_BE2NET_VXLAN
|
||||
if (skyhawk_chip(adapter))
|
||||
vxlan_get_rx_port(netdev);
|
||||
|
|
|
@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
static void _be_roce_dev_open(struct be_adapter *adapter)
|
||||
{
|
||||
if (ocrdma_drv && adapter->ocrdma_dev &&
|
||||
ocrdma_drv->state_change_handler)
|
||||
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
|
||||
BE_DEV_UP);
|
||||
}
|
||||
|
||||
void be_roce_dev_open(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
_be_roce_dev_open(adapter);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void _be_roce_dev_close(struct be_adapter *adapter)
|
||||
{
|
||||
if (ocrdma_drv && adapter->ocrdma_dev &&
|
||||
ocrdma_drv->state_change_handler)
|
||||
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
|
||||
BE_DEV_DOWN);
|
||||
}
|
||||
|
||||
void be_roce_dev_close(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
_be_roce_dev_close(adapter);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void be_roce_dev_shutdown(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
|
@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
|
|||
|
||||
_be_roce_dev_add(dev);
|
||||
netdev = dev->netdev;
|
||||
if (netif_running(netdev) && netif_oper_up(netdev))
|
||||
_be_roce_dev_open(dev);
|
||||
}
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
return 0;
|
||||
|
|
|
@ -60,9 +60,7 @@ struct ocrdma_driver {
|
|||
void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
|
||||
};
|
||||
|
||||
enum {
|
||||
BE_DEV_UP = 0,
|
||||
BE_DEV_DOWN = 1,
|
||||
enum be_roce_event {
|
||||
BE_DEV_SHUTDOWN = 2
|
||||
};
|
||||
|
||||
|
|
|
@ -242,6 +242,13 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
|
|||
unsigned long flags;
|
||||
u64 ns, zero = 0;
|
||||
|
||||
/* mlx4_en_init_timestamp is called for each netdev.
|
||||
* mdev->ptp_clock is common for all ports, skip initialization if
|
||||
* was done for other port.
|
||||
*/
|
||||
if (mdev->ptp_clock)
|
||||
return;
|
||||
|
||||
rwlock_init(&mdev->clock_lock);
|
||||
|
||||
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
|
||||
|
|
|
@ -232,9 +232,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
|
|||
if (mdev->pndev[i])
|
||||
mlx4_en_destroy_netdev(mdev->pndev[i]);
|
||||
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_remove_timestamp(mdev);
|
||||
|
||||
flush_workqueue(mdev->workqueue);
|
||||
destroy_workqueue(mdev->workqueue);
|
||||
(void) mlx4_mr_free(dev, &mdev->mr);
|
||||
|
@ -320,10 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
|
|||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
|
||||
mdev->port_cnt++;
|
||||
|
||||
/* Initialize time stamp mechanism */
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_init_timestamp(mdev);
|
||||
|
||||
/* Set default number of RX rings*/
|
||||
mlx4_en_set_num_rx_rings(mdev);
|
||||
|
||||
|
|
|
@ -2072,6 +2072,9 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|||
/* flush any pending task for this netdev */
|
||||
flush_workqueue(mdev->workqueue);
|
||||
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_remove_timestamp(mdev);
|
||||
|
||||
/* Detach the netdev so tasks would not attempt to access it */
|
||||
mutex_lock(&mdev->state_lock);
|
||||
mdev->pndev[priv->port] = NULL;
|
||||
|
@ -3058,9 +3061,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
}
|
||||
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
|
||||
|
||||
/* Initialize time stamp mechanism */
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||
SERVICE_TASK_DELAY);
|
||||
mlx4_en_init_timestamp(mdev);
|
||||
|
||||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||
SERVICE_TASK_DELAY);
|
||||
|
||||
mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
|
||||
mdev->profile.prof[priv->port].rx_ppp,
|
||||
|
|
|
@ -1937,6 +1937,12 @@ static void refill_rx(struct net_device *dev)
|
|||
break; /* Better luck next round. */
|
||||
np->rx_dma[entry] = pci_map_single(np->pci_dev,
|
||||
skb->data, buflen, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->rx_dma[entry])) {
|
||||
dev_kfree_skb_any(skb);
|
||||
np->rx_skbuff[entry] = NULL;
|
||||
break; /* Better luck next round. */
|
||||
}
|
||||
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
|
||||
}
|
||||
np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
|
||||
|
@ -2093,6 +2099,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
|||
np->tx_skbuff[entry] = skb;
|
||||
np->tx_dma[entry] = pci_map_single(np->pci_dev,
|
||||
skb->data,skb->len, PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
|
||||
np->tx_skbuff[entry] = NULL;
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
|
||||
|
||||
|
|
|
@ -252,7 +252,7 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
|
|||
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
|
||||
}
|
||||
|
||||
if (!idc->vnic_wait_limit) {
|
||||
if (state != QLCNIC_DEV_NPAR_OPER) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"vNIC mode not operational, state check timed out.\n");
|
||||
return -EIO;
|
||||
|
|
|
@ -1167,6 +1167,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
|||
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
|
||||
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
|
||||
dma_addr_t dma_addr;
|
||||
u32 buf_len;
|
||||
|
||||
mdp->cur_rx = 0;
|
||||
mdp->cur_tx = 0;
|
||||
|
@ -1187,9 +1188,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
|||
/* RX descriptor */
|
||||
rxdesc = &mdp->rx_ring[i];
|
||||
/* The size of the buffer is a multiple of 32 bytes. */
|
||||
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
|
||||
dma_addr = dma_map_single(&ndev->dev, skb->data,
|
||||
rxdesc->buffer_length,
|
||||
buf_len = ALIGN(mdp->rx_buf_sz, 32);
|
||||
rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
|
||||
dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
|
||||
DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, dma_addr)) {
|
||||
kfree_skb(skb);
|
||||
|
@ -1220,7 +1221,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
|
|||
mdp->tx_skbuff[i] = NULL;
|
||||
txdesc = &mdp->tx_ring[i];
|
||||
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
|
||||
txdesc->buffer_length = 0;
|
||||
txdesc->len = cpu_to_edmac(mdp, 0);
|
||||
if (i == 0) {
|
||||
/* Tx descriptor address set */
|
||||
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
|
||||
|
@ -1429,7 +1430,8 @@ static int sh_eth_txfree(struct net_device *ndev)
|
|||
if (mdp->tx_skbuff[entry]) {
|
||||
dma_unmap_single(&ndev->dev,
|
||||
edmac_to_cpu(mdp, txdesc->addr),
|
||||
txdesc->buffer_length, DMA_TO_DEVICE);
|
||||
edmac_to_cpu(mdp, txdesc->len) >> 16,
|
||||
DMA_TO_DEVICE);
|
||||
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
|
||||
mdp->tx_skbuff[entry] = NULL;
|
||||
free_num++;
|
||||
|
@ -1439,7 +1441,7 @@ static int sh_eth_txfree(struct net_device *ndev)
|
|||
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
|
||||
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += txdesc->buffer_length;
|
||||
ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16;
|
||||
}
|
||||
return free_num;
|
||||
}
|
||||
|
@ -1458,6 +1460,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
u32 desc_status;
|
||||
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
|
||||
dma_addr_t dma_addr;
|
||||
u32 buf_len;
|
||||
|
||||
boguscnt = min(boguscnt, *quota);
|
||||
limit = boguscnt;
|
||||
|
@ -1466,7 +1469,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
/* RACT bit must be checked before all the following reads */
|
||||
dma_rmb();
|
||||
desc_status = edmac_to_cpu(mdp, rxdesc->status);
|
||||
pkt_len = rxdesc->frame_length;
|
||||
pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL;
|
||||
|
||||
if (--boguscnt < 0)
|
||||
break;
|
||||
|
@ -1532,7 +1535,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
entry = mdp->dirty_rx % mdp->num_rx_ring;
|
||||
rxdesc = &mdp->rx_ring[entry];
|
||||
/* The size of the buffer is 32 byte boundary. */
|
||||
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
|
||||
buf_len = ALIGN(mdp->rx_buf_sz, 32);
|
||||
rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
|
||||
|
||||
if (mdp->rx_skbuff[entry] == NULL) {
|
||||
skb = netdev_alloc_skb(ndev, skbuff_size);
|
||||
|
@ -1540,8 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
break; /* Better luck next round. */
|
||||
sh_eth_set_receive_align(skb);
|
||||
dma_addr = dma_map_single(&ndev->dev, skb->data,
|
||||
rxdesc->buffer_length,
|
||||
DMA_FROM_DEVICE);
|
||||
buf_len, DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(&ndev->dev, dma_addr)) {
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
|
@ -2407,7 +2410,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
txdesc->addr = cpu_to_edmac(mdp, dma_addr);
|
||||
txdesc->buffer_length = skb->len;
|
||||
txdesc->len = cpu_to_edmac(mdp, skb->len << 16);
|
||||
|
||||
dma_wmb(); /* TACT bit must be set after all the above writes */
|
||||
if (entry >= mdp->num_tx_ring - 1)
|
||||
|
|
|
@ -283,7 +283,7 @@ enum DMAC_IM_BIT {
|
|||
DMAC_M_RINT1 = 0x00000001,
|
||||
};
|
||||
|
||||
/* Receive descriptor bit */
|
||||
/* Receive descriptor 0 bits */
|
||||
enum RD_STS_BIT {
|
||||
RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
|
||||
RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
|
||||
|
@ -298,6 +298,12 @@ enum RD_STS_BIT {
|
|||
#define RDFEND RD_RFP0
|
||||
#define RD_RFP (RD_RFP1|RD_RFP0)
|
||||
|
||||
/* Receive descriptor 1 bits */
|
||||
enum RD_LEN_BIT {
|
||||
RD_RFL = 0x0000ffff, /* receive frame length */
|
||||
RD_RBL = 0xffff0000, /* receive buffer length */
|
||||
};
|
||||
|
||||
/* FCFTR */
|
||||
enum FCFTR_BIT {
|
||||
FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
|
||||
|
@ -307,7 +313,7 @@ enum FCFTR_BIT {
|
|||
#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
|
||||
#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
|
||||
|
||||
/* Transmit descriptor bit */
|
||||
/* Transmit descriptor 0 bits */
|
||||
enum TD_STS_BIT {
|
||||
TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
|
||||
TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
|
||||
|
@ -317,6 +323,11 @@ enum TD_STS_BIT {
|
|||
#define TDFEND TD_TFP0
|
||||
#define TD_TFP (TD_TFP1|TD_TFP0)
|
||||
|
||||
/* Transmit descriptor 1 bits */
|
||||
enum TD_LEN_BIT {
|
||||
TD_TBL = 0xffff0000, /* transmit buffer length */
|
||||
};
|
||||
|
||||
/* RMCR */
|
||||
enum RMCR_BIT {
|
||||
RMCR_RNC = 0x00000001,
|
||||
|
@ -425,15 +436,9 @@ enum TSU_FWSLC_BIT {
|
|||
*/
|
||||
struct sh_eth_txdesc {
|
||||
u32 status; /* TD0 */
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
u16 pad0; /* TD1 */
|
||||
u16 buffer_length; /* TD1 */
|
||||
#else
|
||||
u16 buffer_length; /* TD1 */
|
||||
u16 pad0; /* TD1 */
|
||||
#endif
|
||||
u32 len; /* TD1 */
|
||||
u32 addr; /* TD2 */
|
||||
u32 pad1; /* padding data */
|
||||
u32 pad0; /* padding data */
|
||||
} __aligned(2) __packed;
|
||||
|
||||
/* The sh ether Rx buffer descriptors.
|
||||
|
@ -441,13 +446,7 @@ struct sh_eth_txdesc {
|
|||
*/
|
||||
struct sh_eth_rxdesc {
|
||||
u32 status; /* RD0 */
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
u16 frame_length; /* RD1 */
|
||||
u16 buffer_length; /* RD1 */
|
||||
#else
|
||||
u16 buffer_length; /* RD1 */
|
||||
u16 frame_length; /* RD1 */
|
||||
#endif
|
||||
u32 len; /* RD1 */
|
||||
u32 addr; /* RD2 */
|
||||
u32 pad0; /* padding data */
|
||||
} __aligned(2) __packed;
|
||||
|
|
|
@ -2026,45 +2026,54 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
|
|||
for_each_child_of_node(node, slave_node) {
|
||||
struct cpsw_slave_data *slave_data = data->slave_data + i;
|
||||
const void *mac_addr = NULL;
|
||||
u32 phyid;
|
||||
int lenp;
|
||||
const __be32 *parp;
|
||||
struct device_node *mdio_node;
|
||||
struct platform_device *mdio;
|
||||
|
||||
/* This is no slave child node, continue */
|
||||
if (strcmp(slave_node->name, "slave"))
|
||||
continue;
|
||||
|
||||
priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
|
||||
parp = of_get_property(slave_node, "phy_id", &lenp);
|
||||
if (of_phy_is_fixed_link(slave_node)) {
|
||||
struct phy_device *pd;
|
||||
struct device_node *phy_node;
|
||||
struct phy_device *phy_dev;
|
||||
|
||||
/* In the case of a fixed PHY, the DT node associated
|
||||
* to the PHY is the Ethernet MAC DT node.
|
||||
*/
|
||||
ret = of_phy_register_fixed_link(slave_node);
|
||||
if (ret)
|
||||
return ret;
|
||||
pd = of_phy_find_device(slave_node);
|
||||
if (!pd)
|
||||
phy_node = of_node_get(slave_node);
|
||||
phy_dev = of_phy_find_device(phy_node);
|
||||
if (!phy_dev)
|
||||
return -ENODEV;
|
||||
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
||||
PHY_ID_FMT, pd->bus->id, pd->phy_id);
|
||||
PHY_ID_FMT, phy_dev->bus->id, phy_dev->addr);
|
||||
} else if (parp) {
|
||||
u32 phyid;
|
||||
struct device_node *mdio_node;
|
||||
struct platform_device *mdio;
|
||||
|
||||
if (lenp != (sizeof(__be32) * 2)) {
|
||||
dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
|
||||
goto no_phy_slave;
|
||||
}
|
||||
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
|
||||
phyid = be32_to_cpup(parp+1);
|
||||
mdio = of_find_device_by_node(mdio_node);
|
||||
of_node_put(mdio_node);
|
||||
if (!mdio) {
|
||||
dev_err(&pdev->dev, "Missing mdio platform device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
||||
PHY_ID_FMT, mdio->name, phyid);
|
||||
} else {
|
||||
dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
|
||||
goto no_phy_slave;
|
||||
}
|
||||
parp = of_get_property(slave_node, "phy_id", &lenp);
|
||||
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
|
||||
dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
|
||||
goto no_phy_slave;
|
||||
}
|
||||
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
|
||||
phyid = be32_to_cpup(parp+1);
|
||||
mdio = of_find_device_by_node(mdio_node);
|
||||
of_node_put(mdio_node);
|
||||
if (!mdio) {
|
||||
dev_err(&pdev->dev, "Missing mdio platform device\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
|
||||
PHY_ID_FMT, mdio->name, phyid);
|
||||
slave_data->phy_if = of_get_phy_mode(slave_node);
|
||||
if (slave_data->phy_if < 0) {
|
||||
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
|
||||
|
@ -2418,7 +2427,7 @@ static int cpsw_probe(struct platform_device *pdev)
|
|||
ndev->irq = platform_get_irq(pdev, 1);
|
||||
if (ndev->irq < 0) {
|
||||
dev_err(priv->dev, "error getting irq resource\n");
|
||||
ret = -ENOENT;
|
||||
ret = ndev->irq;
|
||||
goto clean_ale_ret;
|
||||
}
|
||||
|
||||
|
@ -2439,8 +2448,10 @@ static int cpsw_probe(struct platform_device *pdev)
|
|||
|
||||
/* RX IRQ */
|
||||
irq = platform_get_irq(pdev, 1);
|
||||
if (irq < 0)
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
goto clean_ale_ret;
|
||||
}
|
||||
|
||||
priv->irqs_table[0] = irq;
|
||||
ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
|
||||
|
@ -2452,8 +2463,10 @@ static int cpsw_probe(struct platform_device *pdev)
|
|||
|
||||
/* TX IRQ */
|
||||
irq = platform_get_irq(pdev, 2);
|
||||
if (irq < 0)
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
goto clean_ale_ret;
|
||||
}
|
||||
|
||||
priv->irqs_table[1] = irq;
|
||||
ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
|
||||
|
|
|
@ -1155,7 +1155,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
|
|||
struct geneve_net *gn = net_generic(net, geneve_net_id);
|
||||
struct geneve_dev *t, *geneve = netdev_priv(dev);
|
||||
bool tun_collect_md, tun_on_same_port;
|
||||
int err;
|
||||
int err, encap_len;
|
||||
|
||||
if (!remote)
|
||||
return -EINVAL;
|
||||
|
@ -1187,6 +1187,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
|
|||
if (t)
|
||||
return -EBUSY;
|
||||
|
||||
/* make enough headroom for basic scenario */
|
||||
encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
|
||||
if (remote->sa.sa_family == AF_INET)
|
||||
encap_len += sizeof(struct iphdr);
|
||||
else
|
||||
encap_len += sizeof(struct ipv6hdr);
|
||||
dev->needed_headroom = encap_len + ETH_HLEN;
|
||||
|
||||
if (metadata) {
|
||||
if (tun_on_same_port)
|
||||
return -EPERM;
|
||||
|
|
|
@ -683,14 +683,14 @@ static void sixpack_close(struct tty_struct *tty)
|
|||
if (!atomic_dec_and_test(&sp->refcnt))
|
||||
down(&sp->dead_sem);
|
||||
|
||||
unregister_netdev(sp->dev);
|
||||
|
||||
del_timer(&sp->tx_t);
|
||||
del_timer(&sp->resync_t);
|
||||
del_timer_sync(&sp->tx_t);
|
||||
del_timer_sync(&sp->resync_t);
|
||||
|
||||
/* Free all 6pack frame buffers. */
|
||||
kfree(sp->rbuff);
|
||||
kfree(sp->xbuff);
|
||||
|
||||
unregister_netdev(sp->dev);
|
||||
}
|
||||
|
||||
/* Perform I/O control on an active 6pack channel. */
|
||||
|
|
|
@ -798,13 +798,13 @@ static void mkiss_close(struct tty_struct *tty)
|
|||
if (!atomic_dec_and_test(&ax->refcnt))
|
||||
down(&ax->dead_sem);
|
||||
|
||||
unregister_netdev(ax->dev);
|
||||
|
||||
/* Free all AX25 frame buffers. */
|
||||
kfree(ax->rbuff);
|
||||
kfree(ax->xbuff);
|
||||
|
||||
ax->tty = NULL;
|
||||
|
||||
unregister_netdev(ax->dev);
|
||||
}
|
||||
|
||||
/* Perform I/O control on an active ax25 channel. */
|
||||
|
|
|
@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
|
|||
.ndo_stop = usbnet_stop,
|
||||
.ndo_start_xmit = usbnet_start_xmit,
|
||||
.ndo_tx_timeout = usbnet_tx_timeout,
|
||||
.ndo_change_mtu = usbnet_change_mtu,
|
||||
.ndo_change_mtu = cdc_ncm_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mii.h>
|
||||
|
@ -689,6 +690,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
|
|||
kfree(ctx);
|
||||
}
|
||||
|
||||
/* we need to override the usbnet change_mtu ndo for two reasons:
|
||||
* - respect the negotiated maximum datagram size
|
||||
* - avoid unwanted changes to rx and tx buffers
|
||||
*/
|
||||
int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
|
||||
{
|
||||
struct usbnet *dev = netdev_priv(net);
|
||||
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
|
||||
int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
|
||||
|
||||
if (new_mtu <= 0 || new_mtu > maxmtu)
|
||||
return -EINVAL;
|
||||
net->mtu = new_mtu;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
|
||||
|
||||
static const struct net_device_ops cdc_ncm_netdev_ops = {
|
||||
.ndo_open = usbnet_open,
|
||||
.ndo_stop = usbnet_stop,
|
||||
.ndo_start_xmit = usbnet_start_xmit,
|
||||
.ndo_tx_timeout = usbnet_tx_timeout,
|
||||
.ndo_change_mtu = cdc_ncm_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
||||
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
|
||||
{
|
||||
struct cdc_ncm_ctx *ctx;
|
||||
|
@ -823,6 +851,9 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
|
|||
/* add our sysfs attrs */
|
||||
dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
|
||||
|
||||
/* must handle MTU changes */
|
||||
dev->net->netdev_ops = &cdc_ncm_netdev_ops;
|
||||
|
||||
return 0;
|
||||
|
||||
error2:
|
||||
|
@ -1558,6 +1589,24 @@ static const struct usb_device_id cdc_devs[] = {
|
|||
.driver_info = (unsigned long) &wwan_info,
|
||||
},
|
||||
|
||||
/* DW5812 LTE Verizon Mobile Broadband Card
|
||||
* Unlike DW5550 this device requires FLAG_NOARP
|
||||
*/
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bb,
|
||||
USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_noarp_info,
|
||||
},
|
||||
|
||||
/* DW5813 LTE AT&T Mobile Broadband Card
|
||||
* Unlike DW5550 this device requires FLAG_NOARP
|
||||
*/
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bc,
|
||||
USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long)&wwan_noarp_info,
|
||||
},
|
||||
|
||||
/* Dell branded MBM devices like DW5550 */
|
||||
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
|
||||
| USB_DEVICE_ID_MATCH_VENDOR,
|
||||
|
|
|
@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
kfree_skb(skb);
|
||||
goto drop;
|
||||
}
|
||||
/* don't change ip_summed == CHECKSUM_PARTIAL, as that
|
||||
* will cause bad checksum on forwarded packets
|
||||
*/
|
||||
if (skb->ip_summed == CHECKSUM_NONE &&
|
||||
rcv->features & NETIF_F_RXCSUM)
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
|
||||
struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
|
||||
|
|
|
@ -69,13 +69,19 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
|
||||
/* Highest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MAX 19
|
||||
#define IWL7260_UCODE_API_MAX 17
|
||||
#define IWL7265_UCODE_API_MAX 19
|
||||
#define IWL7265D_UCODE_API_MAX 19
|
||||
|
||||
/* Oldest version we won't warn about */
|
||||
#define IWL7260_UCODE_API_OK 13
|
||||
#define IWL7265_UCODE_API_OK 13
|
||||
#define IWL7265D_UCODE_API_OK 13
|
||||
|
||||
/* Lowest firmware API version supported */
|
||||
#define IWL7260_UCODE_API_MIN 13
|
||||
#define IWL7265_UCODE_API_MIN 13
|
||||
#define IWL7265D_UCODE_API_MIN 13
|
||||
|
||||
/* NVM versions */
|
||||
#define IWL7260_NVM_VERSION 0x0a1d
|
||||
|
@ -149,10 +155,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
|
|||
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
|
||||
};
|
||||
|
||||
#define IWL_DEVICE_7000 \
|
||||
.ucode_api_max = IWL7260_UCODE_API_MAX, \
|
||||
.ucode_api_ok = IWL7260_UCODE_API_OK, \
|
||||
.ucode_api_min = IWL7260_UCODE_API_MIN, \
|
||||
#define IWL_DEVICE_7000_COMMON \
|
||||
.device_family = IWL_DEVICE_FAMILY_7000, \
|
||||
.max_inst_size = IWL60_RTC_INST_SIZE, \
|
||||
.max_data_size = IWL60_RTC_DATA_SIZE, \
|
||||
|
@ -163,6 +166,24 @@ static const struct iwl_ht_params iwl7000_ht_params = {
|
|||
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
|
||||
.dccm_offset = IWL7000_DCCM_OFFSET
|
||||
|
||||
#define IWL_DEVICE_7000 \
|
||||
IWL_DEVICE_7000_COMMON, \
|
||||
.ucode_api_max = IWL7260_UCODE_API_MAX, \
|
||||
.ucode_api_ok = IWL7260_UCODE_API_OK, \
|
||||
.ucode_api_min = IWL7260_UCODE_API_MIN
|
||||
|
||||
#define IWL_DEVICE_7005 \
|
||||
IWL_DEVICE_7000_COMMON, \
|
||||
.ucode_api_max = IWL7265_UCODE_API_MAX, \
|
||||
.ucode_api_ok = IWL7265_UCODE_API_OK, \
|
||||
.ucode_api_min = IWL7265_UCODE_API_MIN
|
||||
|
||||
#define IWL_DEVICE_7005D \
|
||||
IWL_DEVICE_7000_COMMON, \
|
||||
.ucode_api_max = IWL7265D_UCODE_API_MAX, \
|
||||
.ucode_api_ok = IWL7265D_UCODE_API_OK, \
|
||||
.ucode_api_min = IWL7265D_UCODE_API_MIN
|
||||
|
||||
const struct iwl_cfg iwl7260_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 7260",
|
||||
.fw_name_pre = IWL7260_FW_PRE,
|
||||
|
@ -266,7 +287,7 @@ static const struct iwl_ht_params iwl7265_ht_params = {
|
|||
const struct iwl_cfg iwl3165_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 3165",
|
||||
.fw_name_pre = IWL7265D_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
IWL_DEVICE_7005D,
|
||||
.ht_params = &iwl7000_ht_params,
|
||||
.nvm_ver = IWL3165_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
|
||||
|
@ -277,7 +298,7 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
|
|||
const struct iwl_cfg iwl7265_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 7265",
|
||||
.fw_name_pre = IWL7265_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
IWL_DEVICE_7005,
|
||||
.ht_params = &iwl7265_ht_params,
|
||||
.nvm_ver = IWL7265_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
|
||||
|
@ -288,7 +309,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
|
|||
const struct iwl_cfg iwl7265_2n_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless N 7265",
|
||||
.fw_name_pre = IWL7265_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
IWL_DEVICE_7005,
|
||||
.ht_params = &iwl7265_ht_params,
|
||||
.nvm_ver = IWL7265_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
|
||||
|
@ -299,7 +320,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
|
|||
const struct iwl_cfg iwl7265_n_cfg = {
|
||||
.name = "Intel(R) Wireless N 7265",
|
||||
.fw_name_pre = IWL7265_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
IWL_DEVICE_7005,
|
||||
.ht_params = &iwl7265_ht_params,
|
||||
.nvm_ver = IWL7265_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
|
||||
|
@ -310,7 +331,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
|
|||
const struct iwl_cfg iwl7265d_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 7265",
|
||||
.fw_name_pre = IWL7265D_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
IWL_DEVICE_7005D,
|
||||
.ht_params = &iwl7265_ht_params,
|
||||
.nvm_ver = IWL7265D_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
|
||||
|
@ -321,7 +342,7 @@ const struct iwl_cfg iwl7265d_2ac_cfg = {
|
|||
const struct iwl_cfg iwl7265d_2n_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless N 7265",
|
||||
.fw_name_pre = IWL7265D_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
IWL_DEVICE_7005D,
|
||||
.ht_params = &iwl7265_ht_params,
|
||||
.nvm_ver = IWL7265D_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
|
||||
|
@ -332,7 +353,7 @@ const struct iwl_cfg iwl7265d_2n_cfg = {
|
|||
const struct iwl_cfg iwl7265d_n_cfg = {
|
||||
.name = "Intel(R) Wireless N 7265",
|
||||
.fw_name_pre = IWL7265D_FW_PRE,
|
||||
IWL_DEVICE_7000,
|
||||
IWL_DEVICE_7005D,
|
||||
.ht_params = &iwl7265_ht_params,
|
||||
.nvm_ver = IWL7265D_NVM_VERSION,
|
||||
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
|
||||
|
@ -342,5 +363,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
|
|||
|
||||
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
|
||||
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
|
||||
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
|
||||
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
|
||||
MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
|
||||
MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
|
||||
|
|
|
@ -1222,8 +1222,8 @@ static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm,
|
|||
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
|
||||
u8 sta_id = mvmvif->ap_sta_id;
|
||||
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
/*
|
||||
* It is possible that the 'sta' parameter is NULL,
|
||||
* for example when a GTK is removed - the sta_id will then
|
||||
|
@ -1590,14 +1590,15 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
|
|||
u16 *phase1key)
|
||||
{
|
||||
struct iwl_mvm_sta *mvm_sta;
|
||||
u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
|
||||
u8 sta_id;
|
||||
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
|
||||
|
||||
if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
|
||||
if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
|
||||
goto unlock;
|
||||
|
||||
if (!sta) {
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
||||
if (WARN_ON(IS_ERR_OR_NULL(sta))) {
|
||||
|
@ -1609,6 +1610,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
|
|||
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
|
||||
iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,9 @@ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
|
|||
*val = *(u8 __force *) walker;
|
||||
else if (size == 2)
|
||||
*val = *(u16 __force *) walker;
|
||||
else if (size != 4)
|
||||
else if (size == 4)
|
||||
*val = reg_val;
|
||||
else
|
||||
return PCIBIOS_BAD_REGISTER_NUMBER;
|
||||
|
||||
return PCIBIOS_SUCCESSFUL;
|
||||
|
|
|
@ -58,6 +58,8 @@
|
|||
#include <linux/atalk.h>
|
||||
#include <linux/gfp.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_sock.h>
|
||||
#include <net/bluetooth/rfcomm.h>
|
||||
|
@ -115,19 +117,38 @@
|
|||
#include <asm/fbio.h>
|
||||
#endif
|
||||
|
||||
static int w_long(unsigned int fd, unsigned int cmd,
|
||||
compat_ulong_t __user *argp)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
int err;
|
||||
unsigned long val;
|
||||
#define convert_in_user(srcptr, dstptr) \
|
||||
({ \
|
||||
typeof(*srcptr) val; \
|
||||
\
|
||||
get_user(val, srcptr) || put_user(val, dstptr); \
|
||||
})
|
||||
|
||||
set_fs (KERNEL_DS);
|
||||
err = sys_ioctl(fd, cmd, (unsigned long)&val);
|
||||
set_fs (old_fs);
|
||||
if (!err && put_user(val, argp))
|
||||
static int do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = security_file_ioctl(file, cmd, arg);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return vfs_ioctl(file, cmd, arg);
|
||||
}
|
||||
|
||||
static int w_long(struct file *file,
|
||||
unsigned int cmd, compat_ulong_t __user *argp)
|
||||
{
|
||||
int err;
|
||||
unsigned long __user *valp = compat_alloc_user_space(sizeof(*valp));
|
||||
|
||||
if (valp == NULL)
|
||||
return -EFAULT;
|
||||
return err;
|
||||
err = do_ioctl(file, cmd, (unsigned long)valp);
|
||||
if (err)
|
||||
return err;
|
||||
if (convert_in_user(valp, argp))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct compat_video_event {
|
||||
|
@ -139,23 +160,23 @@ struct compat_video_event {
|
|||
} u;
|
||||
};
|
||||
|
||||
static int do_video_get_event(unsigned int fd, unsigned int cmd,
|
||||
struct compat_video_event __user *up)
|
||||
static int do_video_get_event(struct file *file,
|
||||
unsigned int cmd, struct compat_video_event __user *up)
|
||||
{
|
||||
struct video_event kevent;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
struct video_event __user *kevent =
|
||||
compat_alloc_user_space(sizeof(*kevent));
|
||||
int err;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
err = sys_ioctl(fd, cmd, (unsigned long) &kevent);
|
||||
set_fs(old_fs);
|
||||
if (kevent == NULL)
|
||||
return -EFAULT;
|
||||
|
||||
err = do_ioctl(file, cmd, (unsigned long)kevent);
|
||||
if (!err) {
|
||||
err = put_user(kevent.type, &up->type);
|
||||
err |= put_user(kevent.timestamp, &up->timestamp);
|
||||
err |= put_user(kevent.u.size.w, &up->u.size.w);
|
||||
err |= put_user(kevent.u.size.h, &up->u.size.h);
|
||||
err |= put_user(kevent.u.size.aspect_ratio,
|
||||
err = convert_in_user(&kevent->type, &up->type);
|
||||
err |= convert_in_user(&kevent->timestamp, &up->timestamp);
|
||||
err |= convert_in_user(&kevent->u.size.w, &up->u.size.w);
|
||||
err |= convert_in_user(&kevent->u.size.h, &up->u.size.h);
|
||||
err |= convert_in_user(&kevent->u.size.aspect_ratio,
|
||||
&up->u.size.aspect_ratio);
|
||||
if (err)
|
||||
err = -EFAULT;
|
||||
|
@ -169,8 +190,8 @@ struct compat_video_still_picture {
|
|||
int32_t size;
|
||||
};
|
||||
|
||||
static int do_video_stillpicture(unsigned int fd, unsigned int cmd,
|
||||
struct compat_video_still_picture __user *up)
|
||||
static int do_video_stillpicture(struct file *file,
|
||||
unsigned int cmd, struct compat_video_still_picture __user *up)
|
||||
{
|
||||
struct video_still_picture __user *up_native;
|
||||
compat_uptr_t fp;
|
||||
|
@ -190,7 +211,7 @@ static int do_video_stillpicture(unsigned int fd, unsigned int cmd,
|
|||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
err = sys_ioctl(fd, cmd, (unsigned long) up_native);
|
||||
err = do_ioctl(file, cmd, (unsigned long) up_native);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -200,8 +221,8 @@ struct compat_video_spu_palette {
|
|||
compat_uptr_t palette;
|
||||
};
|
||||
|
||||
static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
|
||||
struct compat_video_spu_palette __user *up)
|
||||
static int do_video_set_spu_palette(struct file *file,
|
||||
unsigned int cmd, struct compat_video_spu_palette __user *up)
|
||||
{
|
||||
struct video_spu_palette __user *up_native;
|
||||
compat_uptr_t palp;
|
||||
|
@ -218,7 +239,7 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
|
|||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
err = sys_ioctl(fd, cmd, (unsigned long) up_native);
|
||||
err = do_ioctl(file, cmd, (unsigned long) up_native);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -276,7 +297,7 @@ static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iov
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
|
||||
static int sg_ioctl_trans(struct file *file, unsigned int cmd,
|
||||
sg_io_hdr32_t __user *sgio32)
|
||||
{
|
||||
sg_io_hdr_t __user *sgio;
|
||||
|
@ -289,7 +310,7 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
|
|||
if (get_user(interface_id, &sgio32->interface_id))
|
||||
return -EFAULT;
|
||||
if (interface_id != 'S')
|
||||
return sys_ioctl(fd, cmd, (unsigned long)sgio32);
|
||||
return do_ioctl(file, cmd, (unsigned long)sgio32);
|
||||
|
||||
if (get_user(iovec_count, &sgio32->iovec_count))
|
||||
return -EFAULT;
|
||||
|
@ -349,7 +370,7 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
|
|||
if (put_user(compat_ptr(data), &sgio->usr_ptr))
|
||||
return -EFAULT;
|
||||
|
||||
err = sys_ioctl(fd, cmd, (unsigned long) sgio);
|
||||
err = do_ioctl(file, cmd, (unsigned long) sgio);
|
||||
|
||||
if (err >= 0) {
|
||||
void __user *datap;
|
||||
|
@ -380,13 +401,13 @@ struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
|
|||
int unused;
|
||||
};
|
||||
|
||||
static int sg_grt_trans(unsigned int fd, unsigned int cmd, struct
|
||||
compat_sg_req_info __user *o)
|
||||
static int sg_grt_trans(struct file *file,
|
||||
unsigned int cmd, struct compat_sg_req_info __user *o)
|
||||
{
|
||||
int err, i;
|
||||
sg_req_info_t __user *r;
|
||||
r = compat_alloc_user_space(sizeof(sg_req_info_t)*SG_MAX_QUEUE);
|
||||
err = sys_ioctl(fd,cmd,(unsigned long)r);
|
||||
err = do_ioctl(file, cmd, (unsigned long)r);
|
||||
if (err < 0)
|
||||
return err;
|
||||
for (i = 0; i < SG_MAX_QUEUE; i++) {
|
||||
|
@ -412,8 +433,8 @@ struct sock_fprog32 {
|
|||
#define PPPIOCSPASS32 _IOW('t', 71, struct sock_fprog32)
|
||||
#define PPPIOCSACTIVE32 _IOW('t', 70, struct sock_fprog32)
|
||||
|
||||
static int ppp_sock_fprog_ioctl_trans(unsigned int fd, unsigned int cmd,
|
||||
struct sock_fprog32 __user *u_fprog32)
|
||||
static int ppp_sock_fprog_ioctl_trans(struct file *file,
|
||||
unsigned int cmd, struct sock_fprog32 __user *u_fprog32)
|
||||
{
|
||||
struct sock_fprog __user *u_fprog64 = compat_alloc_user_space(sizeof(struct sock_fprog));
|
||||
void __user *fptr64;
|
||||
|
@ -435,7 +456,7 @@ static int ppp_sock_fprog_ioctl_trans(unsigned int fd, unsigned int cmd,
|
|||
else
|
||||
cmd = PPPIOCSACTIVE;
|
||||
|
||||
return sys_ioctl(fd, cmd, (unsigned long) u_fprog64);
|
||||
return do_ioctl(file, cmd, (unsigned long) u_fprog64);
|
||||
}
|
||||
|
||||
struct ppp_option_data32 {
|
||||
|
@ -451,7 +472,7 @@ struct ppp_idle32 {
|
|||
};
|
||||
#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32)
|
||||
|
||||
static int ppp_gidle(unsigned int fd, unsigned int cmd,
|
||||
static int ppp_gidle(struct file *file, unsigned int cmd,
|
||||
struct ppp_idle32 __user *idle32)
|
||||
{
|
||||
struct ppp_idle __user *idle;
|
||||
|
@ -460,7 +481,7 @@ static int ppp_gidle(unsigned int fd, unsigned int cmd,
|
|||
|
||||
idle = compat_alloc_user_space(sizeof(*idle));
|
||||
|
||||
err = sys_ioctl(fd, PPPIOCGIDLE, (unsigned long) idle);
|
||||
err = do_ioctl(file, PPPIOCGIDLE, (unsigned long) idle);
|
||||
|
||||
if (!err) {
|
||||
if (get_user(xmit, &idle->xmit_idle) ||
|
||||
|
@ -472,7 +493,7 @@ static int ppp_gidle(unsigned int fd, unsigned int cmd,
|
|||
return err;
|
||||
}
|
||||
|
||||
static int ppp_scompress(unsigned int fd, unsigned int cmd,
|
||||
static int ppp_scompress(struct file *file, unsigned int cmd,
|
||||
struct ppp_option_data32 __user *odata32)
|
||||
{
|
||||
struct ppp_option_data __user *odata;
|
||||
|
@ -492,7 +513,7 @@ static int ppp_scompress(unsigned int fd, unsigned int cmd,
|
|||
sizeof(__u32) + sizeof(int)))
|
||||
return -EFAULT;
|
||||
|
||||
return sys_ioctl(fd, PPPIOCSCOMPRESS, (unsigned long) odata);
|
||||
return do_ioctl(file, PPPIOCSCOMPRESS, (unsigned long) odata);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
@ -512,12 +533,13 @@ struct mtpos32 {
|
|||
};
|
||||
#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
|
||||
|
||||
static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
|
||||
static int mt_ioctl_trans(struct file *file,
|
||||
unsigned int cmd, void __user *argp)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
struct mtget get;
|
||||
/* NULL initialization to make gcc shut up */
|
||||
struct mtget __user *get = NULL;
|
||||
struct mtget32 __user *umget32;
|
||||
struct mtpos pos;
|
||||
struct mtpos __user *pos = NULL;
|
||||
struct mtpos32 __user *upos32;
|
||||
unsigned long kcmd;
|
||||
void *karg;
|
||||
|
@ -526,32 +548,34 @@ static int mt_ioctl_trans(unsigned int fd, unsigned int cmd, void __user *argp)
|
|||
switch(cmd) {
|
||||
case MTIOCPOS32:
|
||||
kcmd = MTIOCPOS;
|
||||
karg = &pos;
|
||||
pos = compat_alloc_user_space(sizeof(*pos));
|
||||
karg = pos;
|
||||
break;
|
||||
default: /* MTIOCGET32 */
|
||||
kcmd = MTIOCGET;
|
||||
karg = &get;
|
||||
get = compat_alloc_user_space(sizeof(*get));
|
||||
karg = get;
|
||||
break;
|
||||
}
|
||||
set_fs (KERNEL_DS);
|
||||
err = sys_ioctl (fd, kcmd, (unsigned long)karg);
|
||||
set_fs (old_fs);
|
||||
if (karg == NULL)
|
||||
return -EFAULT;
|
||||
err = do_ioctl(file, kcmd, (unsigned long)karg);
|
||||
if (err)
|
||||
return err;
|
||||
switch (cmd) {
|
||||
case MTIOCPOS32:
|
||||
upos32 = argp;
|
||||
err = __put_user(pos.mt_blkno, &upos32->mt_blkno);
|
||||
err = convert_in_user(&pos->mt_blkno, &upos32->mt_blkno);
|
||||
break;
|
||||
case MTIOCGET32:
|
||||
umget32 = argp;
|
||||
err = __put_user(get.mt_type, &umget32->mt_type);
|
||||
err |= __put_user(get.mt_resid, &umget32->mt_resid);
|
||||
err |= __put_user(get.mt_dsreg, &umget32->mt_dsreg);
|
||||
err |= __put_user(get.mt_gstat, &umget32->mt_gstat);
|
||||
err |= __put_user(get.mt_erreg, &umget32->mt_erreg);
|
||||
err |= __put_user(get.mt_fileno, &umget32->mt_fileno);
|
||||
err |= __put_user(get.mt_blkno, &umget32->mt_blkno);
|
||||
err = convert_in_user(&get->mt_type, &umget32->mt_type);
|
||||
err |= convert_in_user(&get->mt_resid, &umget32->mt_resid);
|
||||
err |= convert_in_user(&get->mt_dsreg, &umget32->mt_dsreg);
|
||||
err |= convert_in_user(&get->mt_gstat, &umget32->mt_gstat);
|
||||
err |= convert_in_user(&get->mt_erreg, &umget32->mt_erreg);
|
||||
err |= convert_in_user(&get->mt_fileno, &umget32->mt_fileno);
|
||||
err |= convert_in_user(&get->mt_blkno, &umget32->mt_blkno);
|
||||
break;
|
||||
}
|
||||
return err ? -EFAULT: 0;
|
||||
|
@ -605,42 +629,41 @@ struct serial_struct32 {
|
|||
compat_int_t reserved[1];
|
||||
};
|
||||
|
||||
static int serial_struct_ioctl(unsigned fd, unsigned cmd,
|
||||
struct serial_struct32 __user *ss32)
|
||||
static int serial_struct_ioctl(struct file *file,
|
||||
unsigned cmd, struct serial_struct32 __user *ss32)
|
||||
{
|
||||
typedef struct serial_struct32 SS32;
|
||||
int err;
|
||||
struct serial_struct ss;
|
||||
mm_segment_t oldseg = get_fs();
|
||||
struct serial_struct __user *ss = compat_alloc_user_space(sizeof(*ss));
|
||||
__u32 udata;
|
||||
unsigned int base;
|
||||
unsigned char *iomem_base;
|
||||
|
||||
if (ss == NULL)
|
||||
return -EFAULT;
|
||||
if (cmd == TIOCSSERIAL) {
|
||||
if (!access_ok(VERIFY_READ, ss32, sizeof(SS32)))
|
||||
return -EFAULT;
|
||||
if (__copy_from_user(&ss, ss32, offsetof(SS32, iomem_base)))
|
||||
if (copy_in_user(ss, ss32, offsetof(SS32, iomem_base)) ||
|
||||
get_user(udata, &ss32->iomem_base))
|
||||
return -EFAULT;
|
||||
if (__get_user(udata, &ss32->iomem_base))
|
||||
iomem_base = compat_ptr(udata);
|
||||
if (put_user(iomem_base, &ss->iomem_base) ||
|
||||
convert_in_user(&ss32->iomem_reg_shift,
|
||||
&ss->iomem_reg_shift) ||
|
||||
convert_in_user(&ss32->port_high, &ss->port_high) ||
|
||||
put_user(0UL, &ss->iomap_base))
|
||||
return -EFAULT;
|
||||
ss.iomem_base = compat_ptr(udata);
|
||||
if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
|
||||
__get_user(ss.port_high, &ss32->port_high))
|
||||
return -EFAULT;
|
||||
ss.iomap_base = 0UL;
|
||||
}
|
||||
set_fs(KERNEL_DS);
|
||||
err = sys_ioctl(fd,cmd,(unsigned long)(&ss));
|
||||
set_fs(oldseg);
|
||||
err = do_ioctl(file, cmd, (unsigned long)ss);
|
||||
if (cmd == TIOCGSERIAL && err >= 0) {
|
||||
if (!access_ok(VERIFY_WRITE, ss32, sizeof(SS32)))
|
||||
return -EFAULT;
|
||||
if (__copy_to_user(ss32,&ss,offsetof(SS32,iomem_base)))
|
||||
if (copy_in_user(ss32, ss, offsetof(SS32, iomem_base)) ||
|
||||
get_user(iomem_base, &ss->iomem_base))
|
||||
return -EFAULT;
|
||||
base = (unsigned long)ss.iomem_base >> 32 ?
|
||||
0xffffffff : (unsigned)(unsigned long)ss.iomem_base;
|
||||
if (__put_user(base, &ss32->iomem_base) ||
|
||||
__put_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
|
||||
__put_user(ss.port_high, &ss32->port_high))
|
||||
base = (unsigned long)iomem_base >> 32 ?
|
||||
0xffffffff : (unsigned)(unsigned long)iomem_base;
|
||||
if (put_user(base, &ss32->iomem_base) ||
|
||||
convert_in_user(&ss->iomem_reg_shift,
|
||||
&ss32->iomem_reg_shift) ||
|
||||
convert_in_user(&ss->port_high, &ss32->port_high))
|
||||
return -EFAULT;
|
||||
}
|
||||
return err;
|
||||
|
@ -674,8 +697,8 @@ struct i2c_rdwr_aligned {
|
|||
struct i2c_msg msgs[0];
|
||||
};
|
||||
|
||||
static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
|
||||
struct i2c_rdwr_ioctl_data32 __user *udata)
|
||||
static int do_i2c_rdwr_ioctl(struct file *file,
|
||||
unsigned int cmd, struct i2c_rdwr_ioctl_data32 __user *udata)
|
||||
{
|
||||
struct i2c_rdwr_aligned __user *tdata;
|
||||
struct i2c_msg __user *tmsgs;
|
||||
|
@ -708,11 +731,11 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
|
|||
put_user(compat_ptr(datap), &tmsgs[i].buf))
|
||||
return -EFAULT;
|
||||
}
|
||||
return sys_ioctl(fd, cmd, (unsigned long)tdata);
|
||||
return do_ioctl(file, cmd, (unsigned long)tdata);
|
||||
}
|
||||
|
||||
static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd,
|
||||
struct i2c_smbus_ioctl_data32 __user *udata)
|
||||
static int do_i2c_smbus_ioctl(struct file *file,
|
||||
unsigned int cmd, struct i2c_smbus_ioctl_data32 __user *udata)
|
||||
{
|
||||
struct i2c_smbus_ioctl_data __user *tdata;
|
||||
compat_caddr_t datap;
|
||||
|
@ -734,7 +757,7 @@ static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd,
|
|||
__put_user(compat_ptr(datap), &tdata->data))
|
||||
return -EFAULT;
|
||||
|
||||
return sys_ioctl(fd, cmd, (unsigned long)tdata);
|
||||
return do_ioctl(file, cmd, (unsigned long)tdata);
|
||||
}
|
||||
|
||||
#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
|
||||
|
@ -742,29 +765,27 @@ static int do_i2c_smbus_ioctl(unsigned int fd, unsigned int cmd,
|
|||
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
|
||||
#define RTC_EPOCH_SET32 _IOW('p', 0x0e, compat_ulong_t)
|
||||
|
||||
static int rtc_ioctl(unsigned fd, unsigned cmd, void __user *argp)
|
||||
static int rtc_ioctl(struct file *file,
|
||||
unsigned cmd, void __user *argp)
|
||||
{
|
||||
mm_segment_t oldfs = get_fs();
|
||||
compat_ulong_t val32;
|
||||
unsigned long kval;
|
||||
unsigned long __user *valp = compat_alloc_user_space(sizeof(*valp));
|
||||
int ret;
|
||||
|
||||
if (valp == NULL)
|
||||
return -EFAULT;
|
||||
switch (cmd) {
|
||||
case RTC_IRQP_READ32:
|
||||
case RTC_EPOCH_READ32:
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, (cmd == RTC_IRQP_READ32) ?
|
||||
ret = do_ioctl(file, (cmd == RTC_IRQP_READ32) ?
|
||||
RTC_IRQP_READ : RTC_EPOCH_READ,
|
||||
(unsigned long)&kval);
|
||||
set_fs(oldfs);
|
||||
(unsigned long)valp);
|
||||
if (ret)
|
||||
return ret;
|
||||
val32 = kval;
|
||||
return put_user(val32, (unsigned int __user *)argp);
|
||||
return convert_in_user(valp, (unsigned int __user *)argp);
|
||||
case RTC_IRQP_SET32:
|
||||
return sys_ioctl(fd, RTC_IRQP_SET, (unsigned long)argp);
|
||||
return do_ioctl(file, RTC_IRQP_SET, (unsigned long)argp);
|
||||
case RTC_EPOCH_SET32:
|
||||
return sys_ioctl(fd, RTC_EPOCH_SET, (unsigned long)argp);
|
||||
return do_ioctl(file, RTC_EPOCH_SET, (unsigned long)argp);
|
||||
}
|
||||
|
||||
return -ENOIOCTLCMD;
|
||||
|
@ -1436,53 +1457,53 @@ IGNORE_IOCTL(FBIOGCURSOR32)
|
|||
* a compat_ioctl operation in the place that handleѕ the
|
||||
* ioctl for the native case.
|
||||
*/
|
||||
static long do_ioctl_trans(int fd, unsigned int cmd,
|
||||
static long do_ioctl_trans(unsigned int cmd,
|
||||
unsigned long arg, struct file *file)
|
||||
{
|
||||
void __user *argp = compat_ptr(arg);
|
||||
|
||||
switch (cmd) {
|
||||
case PPPIOCGIDLE32:
|
||||
return ppp_gidle(fd, cmd, argp);
|
||||
return ppp_gidle(file, cmd, argp);
|
||||
case PPPIOCSCOMPRESS32:
|
||||
return ppp_scompress(fd, cmd, argp);
|
||||
return ppp_scompress(file, cmd, argp);
|
||||
case PPPIOCSPASS32:
|
||||
case PPPIOCSACTIVE32:
|
||||
return ppp_sock_fprog_ioctl_trans(fd, cmd, argp);
|
||||
return ppp_sock_fprog_ioctl_trans(file, cmd, argp);
|
||||
#ifdef CONFIG_BLOCK
|
||||
case SG_IO:
|
||||
return sg_ioctl_trans(fd, cmd, argp);
|
||||
return sg_ioctl_trans(file, cmd, argp);
|
||||
case SG_GET_REQUEST_TABLE:
|
||||
return sg_grt_trans(fd, cmd, argp);
|
||||
return sg_grt_trans(file, cmd, argp);
|
||||
case MTIOCGET32:
|
||||
case MTIOCPOS32:
|
||||
return mt_ioctl_trans(fd, cmd, argp);
|
||||
return mt_ioctl_trans(file, cmd, argp);
|
||||
#endif
|
||||
/* Serial */
|
||||
case TIOCGSERIAL:
|
||||
case TIOCSSERIAL:
|
||||
return serial_struct_ioctl(fd, cmd, argp);
|
||||
return serial_struct_ioctl(file, cmd, argp);
|
||||
/* i2c */
|
||||
case I2C_FUNCS:
|
||||
return w_long(fd, cmd, argp);
|
||||
return w_long(file, cmd, argp);
|
||||
case I2C_RDWR:
|
||||
return do_i2c_rdwr_ioctl(fd, cmd, argp);
|
||||
return do_i2c_rdwr_ioctl(file, cmd, argp);
|
||||
case I2C_SMBUS:
|
||||
return do_i2c_smbus_ioctl(fd, cmd, argp);
|
||||
return do_i2c_smbus_ioctl(file, cmd, argp);
|
||||
/* Not implemented in the native kernel */
|
||||
case RTC_IRQP_READ32:
|
||||
case RTC_IRQP_SET32:
|
||||
case RTC_EPOCH_READ32:
|
||||
case RTC_EPOCH_SET32:
|
||||
return rtc_ioctl(fd, cmd, argp);
|
||||
return rtc_ioctl(file, cmd, argp);
|
||||
|
||||
/* dvb */
|
||||
case VIDEO_GET_EVENT:
|
||||
return do_video_get_event(fd, cmd, argp);
|
||||
return do_video_get_event(file, cmd, argp);
|
||||
case VIDEO_STILLPICTURE:
|
||||
return do_video_stillpicture(fd, cmd, argp);
|
||||
return do_video_stillpicture(file, cmd, argp);
|
||||
case VIDEO_SET_SPU_PALETTE:
|
||||
return do_video_set_spu_palette(fd, cmd, argp);
|
||||
return do_video_set_spu_palette(file, cmd, argp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1513,7 +1534,7 @@ static long do_ioctl_trans(int fd, unsigned int cmd,
|
|||
case NBD_SET_BLKSIZE:
|
||||
case NBD_SET_SIZE:
|
||||
case NBD_SET_SIZE_BLOCKS:
|
||||
return do_vfs_ioctl(file, fd, cmd, arg);
|
||||
return vfs_ioctl(file, cmd, arg);
|
||||
}
|
||||
|
||||
return -ENOIOCTLCMD;
|
||||
|
@ -1602,7 +1623,7 @@ COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
|
|||
if (compat_ioctl_check_table(XFORM(cmd)))
|
||||
goto found_handler;
|
||||
|
||||
error = do_ioctl_trans(fd, cmd, arg, f.file);
|
||||
error = do_ioctl_trans(cmd, arg, f.file);
|
||||
if (error == -ENOIOCTLCMD)
|
||||
error = -ENOTTY;
|
||||
|
||||
|
|
|
@ -151,3 +151,10 @@ extern void mnt_pin_kill(struct mount *m);
|
|||
* fs/nsfs.c
|
||||
*/
|
||||
extern struct dentry_operations ns_dentry_operations;
|
||||
|
||||
/*
|
||||
* fs/ioctl.c
|
||||
*/
|
||||
extern int do_vfs_ioctl(struct file *file, unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/writeback.h>
|
||||
#include <linux/buffer_head.h>
|
||||
#include <linux/falloc.h>
|
||||
#include "internal.h"
|
||||
|
||||
#include <asm/ioctls.h>
|
||||
|
||||
|
@ -32,8 +33,7 @@
|
|||
*
|
||||
* Returns 0 on success, -errno on error.
|
||||
*/
|
||||
static long vfs_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int error = -ENOTTY;
|
||||
|
||||
|
|
|
@ -2843,6 +2843,8 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
|
|||
res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
|
||||
if (!ret)
|
||||
BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
|
||||
else
|
||||
res->migration_pending = 0;
|
||||
spin_unlock(&res->spinlock);
|
||||
|
||||
/*
|
||||
|
|
|
@ -67,7 +67,10 @@ static int ocfs2_do_flock(struct file *file, struct inode *inode,
|
|||
*/
|
||||
|
||||
locks_lock_file_wait(file,
|
||||
&(struct file_lock){.fl_type = F_UNLCK});
|
||||
&(struct file_lock) {
|
||||
.fl_type = F_UNLCK,
|
||||
.fl_flags = FL_FLOCK
|
||||
});
|
||||
|
||||
ocfs2_file_unlock(file);
|
||||
}
|
||||
|
|
|
@ -54,11 +54,12 @@
|
|||
static u16 ocfs2_calc_new_backup_super(struct inode *inode,
|
||||
struct ocfs2_group_desc *gd,
|
||||
u16 cl_cpg,
|
||||
u16 old_bg_clusters,
|
||||
int set)
|
||||
{
|
||||
int i;
|
||||
u16 backups = 0;
|
||||
u32 cluster;
|
||||
u32 cluster, lgd_cluster;
|
||||
u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
|
||||
|
||||
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
|
||||
|
@ -71,6 +72,12 @@ static u16 ocfs2_calc_new_backup_super(struct inode *inode,
|
|||
else if (gd_blkno > lgd_blkno)
|
||||
break;
|
||||
|
||||
/* check if already done backup super */
|
||||
lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno);
|
||||
lgd_cluster += old_bg_clusters;
|
||||
if (lgd_cluster >= cluster)
|
||||
continue;
|
||||
|
||||
if (set)
|
||||
ocfs2_set_bit(cluster % cl_cpg,
|
||||
(unsigned long *)gd->bg_bitmap);
|
||||
|
@ -99,6 +106,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
|||
u16 chain, num_bits, backups = 0;
|
||||
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
|
||||
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
|
||||
u16 old_bg_clusters;
|
||||
|
||||
trace_ocfs2_update_last_group_and_inode(new_clusters,
|
||||
first_new_cluster);
|
||||
|
@ -112,6 +120,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
|||
|
||||
group = (struct ocfs2_group_desc *)group_bh->b_data;
|
||||
|
||||
old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc;
|
||||
/* update the group first. */
|
||||
num_bits = new_clusters * cl_bpc;
|
||||
le16_add_cpu(&group->bg_bits, num_bits);
|
||||
|
@ -125,7 +134,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
|||
OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
|
||||
backups = ocfs2_calc_new_backup_super(bm_inode,
|
||||
group,
|
||||
cl_cpg, 1);
|
||||
cl_cpg, old_bg_clusters, 1);
|
||||
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
|
||||
}
|
||||
|
||||
|
@ -163,7 +172,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
|||
if (ret < 0) {
|
||||
ocfs2_calc_new_backup_super(bm_inode,
|
||||
group,
|
||||
cl_cpg, 0);
|
||||
cl_cpg, old_bg_clusters, 0);
|
||||
le16_add_cpu(&group->bg_free_bits_count, backups);
|
||||
le16_add_cpu(&group->bg_bits, -1 * num_bits);
|
||||
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
|
||||
|
|
|
@ -797,6 +797,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
|||
extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
extern void blk_start_queue_async(struct request_queue *q);
|
||||
extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
extern void __blk_stop_queue(struct request_queue *q);
|
||||
|
|
|
@ -2766,8 +2766,6 @@ extern int vfs_lstat(const char __user *, struct kstat *);
|
|||
extern int vfs_fstat(unsigned int, struct kstat *);
|
||||
extern int vfs_fstatat(int , const char __user *, struct kstat *, int);
|
||||
|
||||
extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
extern int __generic_block_fiemap(struct inode *inode,
|
||||
struct fiemap_extent_info *fieinfo,
|
||||
loff_t start, loff_t len,
|
||||
|
|
|
@ -138,6 +138,7 @@ struct cdc_ncm_ctx {
|
|||
};
|
||||
|
||||
u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
|
||||
int cdc_ncm_change_mtu(struct net_device *net, int new_mtu);
|
||||
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags);
|
||||
void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
|
||||
struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
|
||||
|
|
|
@ -176,11 +176,11 @@ extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
|
|||
#define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
|
||||
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
|
||||
void __inc_zone_page_state(struct page *, enum zone_stat_item);
|
||||
void __dec_zone_page_state(struct page *, enum zone_stat_item);
|
||||
|
||||
void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
|
||||
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
|
||||
void inc_zone_page_state(struct page *, enum zone_stat_item);
|
||||
void dec_zone_page_state(struct page *, enum zone_stat_item);
|
||||
|
||||
|
@ -205,7 +205,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|||
* The functions directly modify the zone and global counters.
|
||||
*/
|
||||
static inline void __mod_zone_page_state(struct zone *zone,
|
||||
enum zone_stat_item item, int delta)
|
||||
enum zone_stat_item item, long delta)
|
||||
{
|
||||
zone_page_state_add(delta, zone, item);
|
||||
}
|
||||
|
|
|
@ -519,7 +519,8 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
|
|||
return -ENOMEM;
|
||||
|
||||
spin_lock(&ht->lock);
|
||||
iter->walker->tbl = rht_dereference(ht->tbl, ht);
|
||||
iter->walker->tbl =
|
||||
rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
|
||||
list_add(&iter->walker->list, &iter->walker->tbl->walkers);
|
||||
spin_unlock(&ht->lock);
|
||||
|
||||
|
|
|
@ -903,14 +903,20 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
|||
if (prev && reclaim->generation != iter->generation)
|
||||
goto out_unlock;
|
||||
|
||||
do {
|
||||
while (1) {
|
||||
pos = READ_ONCE(iter->position);
|
||||
if (!pos || css_tryget(&pos->css))
|
||||
break;
|
||||
/*
|
||||
* A racing update may change the position and
|
||||
* put the last reference, hence css_tryget(),
|
||||
* or retry to see the updated position.
|
||||
* css reference reached zero, so iter->position will
|
||||
* be cleared by ->css_released. However, we should not
|
||||
* rely on this happening soon, because ->css_released
|
||||
* is called from a work queue, and by busy-waiting we
|
||||
* might block it. So we clear iter->position right
|
||||
* away.
|
||||
*/
|
||||
} while (pos && !css_tryget(&pos->css));
|
||||
(void)cmpxchg(&iter->position, pos, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
if (pos)
|
||||
|
@ -956,17 +962,13 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
|
|||
}
|
||||
|
||||
if (reclaim) {
|
||||
if (cmpxchg(&iter->position, pos, memcg) == pos) {
|
||||
if (memcg)
|
||||
css_get(&memcg->css);
|
||||
if (pos)
|
||||
css_put(&pos->css);
|
||||
}
|
||||
|
||||
/*
|
||||
* pairs with css_tryget when dereferencing iter->position
|
||||
* above.
|
||||
* The position could have already been updated by a competing
|
||||
* thread, so check that the value hasn't changed since we read
|
||||
* it to avoid reclaiming from the same cgroup twice.
|
||||
*/
|
||||
(void)cmpxchg(&iter->position, pos, memcg);
|
||||
|
||||
if (pos)
|
||||
css_put(&pos->css);
|
||||
|
||||
|
@ -999,6 +1001,28 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
|
|||
css_put(&prev->css);
|
||||
}
|
||||
|
||||
static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
|
||||
{
|
||||
struct mem_cgroup *memcg = dead_memcg;
|
||||
struct mem_cgroup_reclaim_iter *iter;
|
||||
struct mem_cgroup_per_zone *mz;
|
||||
int nid, zid;
|
||||
int i;
|
||||
|
||||
while ((memcg = parent_mem_cgroup(memcg))) {
|
||||
for_each_node(nid) {
|
||||
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
|
||||
mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
|
||||
for (i = 0; i <= DEF_PRIORITY; i++) {
|
||||
iter = &mz->iter[i];
|
||||
cmpxchg(&iter->position,
|
||||
dead_memcg, NULL);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Iteration constructs for visiting all cgroups (under a tree). If
|
||||
* loops are exited prematurely (break), mem_cgroup_iter_break() must
|
||||
|
@ -4324,6 +4348,13 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
|
|||
wb_memcg_offline(memcg);
|
||||
}
|
||||
|
||||
static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
||||
invalidate_reclaim_iterators(memcg);
|
||||
}
|
||||
|
||||
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
|
||||
{
|
||||
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
|
||||
|
@ -5185,6 +5216,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
|
|||
.css_alloc = mem_cgroup_css_alloc,
|
||||
.css_online = mem_cgroup_css_online,
|
||||
.css_offline = mem_cgroup_css_offline,
|
||||
.css_released = mem_cgroup_css_released,
|
||||
.css_free = mem_cgroup_css_free,
|
||||
.css_reset = mem_cgroup_css_reset,
|
||||
.can_attach = mem_cgroup_can_attach,
|
||||
|
|
|
@ -1375,23 +1375,30 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
|
|||
*/
|
||||
int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
|
||||
{
|
||||
unsigned long pfn;
|
||||
unsigned long pfn, sec_end_pfn;
|
||||
struct zone *zone = NULL;
|
||||
struct page *page;
|
||||
int i;
|
||||
for (pfn = start_pfn;
|
||||
for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn);
|
||||
pfn < end_pfn;
|
||||
pfn += MAX_ORDER_NR_PAGES) {
|
||||
i = 0;
|
||||
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
|
||||
while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
|
||||
i++;
|
||||
if (i == MAX_ORDER_NR_PAGES)
|
||||
pfn = sec_end_pfn + 1, sec_end_pfn += PAGES_PER_SECTION) {
|
||||
/* Make sure the memory section is present first */
|
||||
if (!present_section_nr(pfn_to_section_nr(pfn)))
|
||||
continue;
|
||||
page = pfn_to_page(pfn + i);
|
||||
if (zone && page_zone(page) != zone)
|
||||
return 0;
|
||||
zone = page_zone(page);
|
||||
for (; pfn < sec_end_pfn && pfn < end_pfn;
|
||||
pfn += MAX_ORDER_NR_PAGES) {
|
||||
i = 0;
|
||||
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
|
||||
while ((i < MAX_ORDER_NR_PAGES) &&
|
||||
!pfn_valid_within(pfn + i))
|
||||
i++;
|
||||
if (i == MAX_ORDER_NR_PAGES)
|
||||
continue;
|
||||
page = pfn_to_page(pfn + i);
|
||||
if (zone && page_zone(page) != zone)
|
||||
return 0;
|
||||
zone = page_zone(page);
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
|
10
mm/vmstat.c
10
mm/vmstat.c
|
@ -219,7 +219,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
|
|||
* particular counter cannot be updated from interrupt context.
|
||||
*/
|
||||
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||
int delta)
|
||||
long delta)
|
||||
{
|
||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||
|
@ -318,8 +318,8 @@ EXPORT_SYMBOL(__dec_zone_page_state);
|
|||
* 1 Overstepping half of threshold
|
||||
* -1 Overstepping minus half of threshold
|
||||
*/
|
||||
static inline void mod_state(struct zone *zone,
|
||||
enum zone_stat_item item, int delta, int overstep_mode)
|
||||
static inline void mod_state(struct zone *zone, enum zone_stat_item item,
|
||||
long delta, int overstep_mode)
|
||||
{
|
||||
struct per_cpu_pageset __percpu *pcp = zone->pageset;
|
||||
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||||
|
@ -357,7 +357,7 @@ static inline void mod_state(struct zone *zone,
|
|||
}
|
||||
|
||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||
int delta)
|
||||
long delta)
|
||||
{
|
||||
mod_state(zone, item, delta, 0);
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ EXPORT_SYMBOL(dec_zone_page_state);
|
|||
* Use interrupt disable to serialize counter updates
|
||||
*/
|
||||
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||||
int delta)
|
||||
long delta)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ void br_init_port(struct net_bridge_port *p)
|
|||
struct switchdev_attr attr = {
|
||||
.id = SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME,
|
||||
.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP | SWITCHDEV_F_DEFER,
|
||||
.u.ageing_time = p->br->ageing_time,
|
||||
.u.ageing_time = jiffies_to_clock_t(p->br->ageing_time),
|
||||
};
|
||||
int err;
|
||||
|
||||
|
|
|
@ -253,9 +253,6 @@ ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
|
||||
p.i_key = p.o_key = 0;
|
||||
p.i_flags = p.o_flags = 0;
|
||||
if (p.iph.ttl)
|
||||
p.iph.frag_off |= htons(IP_DF);
|
||||
|
||||
err = ip_tunnel_ioctl(dev, &p, cmd);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -259,7 +259,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
|||
xfrm_dst_ifdown(dst, dev);
|
||||
}
|
||||
|
||||
static struct dst_ops xfrm4_dst_ops = {
|
||||
static struct dst_ops xfrm4_dst_ops_template = {
|
||||
.family = AF_INET,
|
||||
.gc = xfrm4_garbage_collect,
|
||||
.update_pmtu = xfrm4_update_pmtu,
|
||||
|
@ -273,7 +273,7 @@ static struct dst_ops xfrm4_dst_ops = {
|
|||
|
||||
static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
|
||||
.family = AF_INET,
|
||||
.dst_ops = &xfrm4_dst_ops,
|
||||
.dst_ops = &xfrm4_dst_ops_template,
|
||||
.dst_lookup = xfrm4_dst_lookup,
|
||||
.get_saddr = xfrm4_get_saddr,
|
||||
.decode_session = _decode_session4,
|
||||
|
@ -295,7 +295,7 @@ static struct ctl_table xfrm4_policy_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static int __net_init xfrm4_net_init(struct net *net)
|
||||
static int __net_init xfrm4_net_sysctl_init(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
struct ctl_table_header *hdr;
|
||||
|
@ -323,7 +323,7 @@ static int __net_init xfrm4_net_init(struct net *net)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __net_exit xfrm4_net_exit(struct net *net)
|
||||
static void __net_exit xfrm4_net_sysctl_exit(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
|
||||
|
@ -335,12 +335,44 @@ static void __net_exit xfrm4_net_exit(struct net *net)
|
|||
if (!net_eq(net, &init_net))
|
||||
kfree(table);
|
||||
}
|
||||
#else /* CONFIG_SYSCTL */
|
||||
static int inline xfrm4_net_sysctl_init(struct net *net)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void inline xfrm4_net_sysctl_exit(struct net *net)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __net_init xfrm4_net_init(struct net *net)
|
||||
{
|
||||
int ret;
|
||||
|
||||
memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
|
||||
sizeof(xfrm4_dst_ops_template));
|
||||
ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = xfrm4_net_sysctl_init(net);
|
||||
if (ret)
|
||||
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __net_exit xfrm4_net_exit(struct net *net)
|
||||
{
|
||||
xfrm4_net_sysctl_exit(net);
|
||||
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
|
||||
}
|
||||
|
||||
static struct pernet_operations __net_initdata xfrm4_net_ops = {
|
||||
.init = xfrm4_net_init,
|
||||
.exit = xfrm4_net_exit,
|
||||
};
|
||||
#endif
|
||||
|
||||
static void __init xfrm4_policy_init(void)
|
||||
{
|
||||
|
@ -349,13 +381,9 @@ static void __init xfrm4_policy_init(void)
|
|||
|
||||
void __init xfrm4_init(void)
|
||||
{
|
||||
dst_entries_init(&xfrm4_dst_ops);
|
||||
|
||||
xfrm4_state_init();
|
||||
xfrm4_policy_init();
|
||||
xfrm4_protocol_init();
|
||||
#ifdef CONFIG_SYSCTL
|
||||
register_pernet_subsys(&xfrm4_net_ops);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -5369,13 +5369,10 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!write) {
|
||||
err = snprintf(str, sizeof(str), "%pI6",
|
||||
&secret->secret);
|
||||
if (err >= sizeof(str)) {
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
|
||||
if (err >= sizeof(str)) {
|
||||
err = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = proc_dostring(&lctl, write, buffer, lenp, ppos);
|
||||
|
|
|
@ -552,7 +552,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh)
|
|||
|
||||
rcu_read_lock();
|
||||
p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
|
||||
if (p && ip6addrlbl_hold(p))
|
||||
if (p && !ip6addrlbl_hold(p))
|
||||
p = NULL;
|
||||
lseq = ip6addrlbl_table.seq;
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -1183,7 +1183,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
|||
*/
|
||||
if (!in6_dev->cnf.accept_ra_from_local &&
|
||||
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
|
||||
NULL, 0)) {
|
||||
in6_dev->dev, 0)) {
|
||||
ND_PRINTK(2, info,
|
||||
"RA from local address detected on dev: %s: default router ignored\n",
|
||||
skb->dev->name);
|
||||
|
@ -1337,7 +1337,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
|||
#ifdef CONFIG_IPV6_ROUTE_INFO
|
||||
if (!in6_dev->cnf.accept_ra_from_local &&
|
||||
ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
|
||||
NULL, 0)) {
|
||||
in6_dev->dev, 0)) {
|
||||
ND_PRINTK(2, info,
|
||||
"RA from local address detected on dev: %s: router info ignored.\n",
|
||||
skb->dev->name);
|
||||
|
|
|
@ -279,7 +279,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
|||
xfrm_dst_ifdown(dst, dev);
|
||||
}
|
||||
|
||||
static struct dst_ops xfrm6_dst_ops = {
|
||||
static struct dst_ops xfrm6_dst_ops_template = {
|
||||
.family = AF_INET6,
|
||||
.gc = xfrm6_garbage_collect,
|
||||
.update_pmtu = xfrm6_update_pmtu,
|
||||
|
@ -293,7 +293,7 @@ static struct dst_ops xfrm6_dst_ops = {
|
|||
|
||||
static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
|
||||
.family = AF_INET6,
|
||||
.dst_ops = &xfrm6_dst_ops,
|
||||
.dst_ops = &xfrm6_dst_ops_template,
|
||||
.dst_lookup = xfrm6_dst_lookup,
|
||||
.get_saddr = xfrm6_get_saddr,
|
||||
.decode_session = _decode_session6,
|
||||
|
@ -325,7 +325,7 @@ static struct ctl_table xfrm6_policy_table[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static int __net_init xfrm6_net_init(struct net *net)
|
||||
static int __net_init xfrm6_net_sysctl_init(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
struct ctl_table_header *hdr;
|
||||
|
@ -353,7 +353,7 @@ static int __net_init xfrm6_net_init(struct net *net)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __net_exit xfrm6_net_exit(struct net *net)
|
||||
static void __net_exit xfrm6_net_sysctl_exit(struct net *net)
|
||||
{
|
||||
struct ctl_table *table;
|
||||
|
||||
|
@ -365,24 +365,52 @@ static void __net_exit xfrm6_net_exit(struct net *net)
|
|||
if (!net_eq(net, &init_net))
|
||||
kfree(table);
|
||||
}
|
||||
#else /* CONFIG_SYSCTL */
|
||||
static int inline xfrm6_net_sysctl_init(struct net *net)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void inline xfrm6_net_sysctl_exit(struct net *net)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __net_init xfrm6_net_init(struct net *net)
|
||||
{
|
||||
int ret;
|
||||
|
||||
memcpy(&net->xfrm.xfrm6_dst_ops, &xfrm6_dst_ops_template,
|
||||
sizeof(xfrm6_dst_ops_template));
|
||||
ret = dst_entries_init(&net->xfrm.xfrm6_dst_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = xfrm6_net_sysctl_init(net);
|
||||
if (ret)
|
||||
dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __net_exit xfrm6_net_exit(struct net *net)
|
||||
{
|
||||
xfrm6_net_sysctl_exit(net);
|
||||
dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
|
||||
}
|
||||
|
||||
static struct pernet_operations xfrm6_net_ops = {
|
||||
.init = xfrm6_net_init,
|
||||
.exit = xfrm6_net_exit,
|
||||
};
|
||||
#endif
|
||||
|
||||
int __init xfrm6_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
dst_entries_init(&xfrm6_dst_ops);
|
||||
|
||||
ret = xfrm6_policy_init();
|
||||
if (ret) {
|
||||
dst_entries_destroy(&xfrm6_dst_ops);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
ret = xfrm6_state_init();
|
||||
if (ret)
|
||||
goto out_policy;
|
||||
|
@ -391,9 +419,7 @@ int __init xfrm6_init(void)
|
|||
if (ret)
|
||||
goto out_state;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
register_pernet_subsys(&xfrm6_net_ops);
|
||||
#endif
|
||||
out:
|
||||
return ret;
|
||||
out_state:
|
||||
|
@ -405,11 +431,8 @@ int __init xfrm6_init(void)
|
|||
|
||||
void xfrm6_fini(void)
|
||||
{
|
||||
#ifdef CONFIG_SYSCTL
|
||||
unregister_pernet_subsys(&xfrm6_net_ops);
|
||||
#endif
|
||||
xfrm6_protocol_fini();
|
||||
xfrm6_policy_fini();
|
||||
xfrm6_state_fini();
|
||||
dst_entries_destroy(&xfrm6_dst_ops);
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ nft_do_chain_netdev(void *priv, struct sk_buff *skb,
|
|||
{
|
||||
struct nft_pktinfo pkt;
|
||||
|
||||
switch (eth_hdr(skb)->h_proto) {
|
||||
switch (skb->protocol) {
|
||||
case htons(ETH_P_IP):
|
||||
nft_netdev_set_pktinfo_ipv4(&pkt, skb, state);
|
||||
break;
|
||||
|
|
|
@ -366,6 +366,7 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
|||
goto nla_put_failure;
|
||||
|
||||
switch (priv->key) {
|
||||
case NFT_CT_L3PROTOCOL:
|
||||
case NFT_CT_PROTOCOL:
|
||||
case NFT_CT_SRC:
|
||||
case NFT_CT_DST:
|
||||
|
|
|
@ -698,6 +698,10 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
|
|||
OVS_NLERR(log, "Failed to allocate conntrack template");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
__set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
|
||||
nf_conntrack_get(&ct_info.ct->ct_general);
|
||||
|
||||
if (helper) {
|
||||
err = ovs_ct_add_helper(&ct_info, helper, key, log);
|
||||
if (err)
|
||||
|
@ -709,8 +713,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
|
|||
if (err)
|
||||
goto err_free_ct;
|
||||
|
||||
__set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
|
||||
nf_conntrack_get(&ct_info.ct->ct_general);
|
||||
return 0;
|
||||
err_free_ct:
|
||||
__ovs_ct_free_action(&ct_info);
|
||||
|
|
|
@ -2434,7 +2434,10 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
|
|||
if (!start)
|
||||
return -EMSGSIZE;
|
||||
|
||||
err = ovs_nla_put_tunnel_info(skb, tun_info);
|
||||
err = ip_tun_to_nlattr(skb, &tun_info->key,
|
||||
ip_tunnel_info_opts(tun_info),
|
||||
tun_info->options_len,
|
||||
ip_tunnel_info_af(tun_info));
|
||||
if (err)
|
||||
return err;
|
||||
nla_nest_end(skb, start);
|
||||
|
|
|
@ -4829,7 +4829,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
|
|||
|
||||
retval = SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
|
||||
if (abort)
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
|
||||
|
||||
/* Even if we can't send the ABORT due to low memory delete the
|
||||
* TCB. This is a departure from our typical NOMEM handling.
|
||||
|
@ -4966,7 +4967,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
|
|||
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
|
||||
retval = SCTP_DISPOSITION_CONSUME;
|
||||
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
|
||||
if (abort)
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
|
||||
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
|
||||
SCTP_STATE(SCTP_STATE_CLOSED));
|
||||
|
|
|
@ -1301,8 +1301,9 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
|
|||
int addrs_size,
|
||||
sctp_assoc_t *assoc_id)
|
||||
{
|
||||
int err = 0;
|
||||
struct sockaddr *kaddrs;
|
||||
gfp_t gfp = GFP_KERNEL;
|
||||
int err = 0;
|
||||
|
||||
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
|
||||
__func__, sk, addrs, addrs_size);
|
||||
|
@ -1315,7 +1316,9 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
|
|||
return -EFAULT;
|
||||
|
||||
/* Alloc space for the address array in kernel memory. */
|
||||
kaddrs = kmalloc(addrs_size, GFP_KERNEL);
|
||||
if (sk->sk_socket->file)
|
||||
gfp = GFP_USER | __GFP_NOWARN;
|
||||
kaddrs = kmalloc(addrs_size, gfp);
|
||||
if (unlikely(!kaddrs))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1513,8 +1516,7 @@ static void sctp_close(struct sock *sk, long timeout)
|
|||
struct sctp_chunk *chunk;
|
||||
|
||||
chunk = sctp_make_abort_user(asoc, NULL, 0);
|
||||
if (chunk)
|
||||
sctp_primitive_ABORT(net, asoc, chunk);
|
||||
sctp_primitive_ABORT(net, asoc, chunk);
|
||||
} else
|
||||
sctp_primitive_SHUTDOWN(net, asoc, NULL);
|
||||
}
|
||||
|
@ -5773,7 +5775,7 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
|
|||
|
||||
len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
|
||||
|
||||
ids = kmalloc(len, GFP_KERNEL);
|
||||
ids = kmalloc(len, GFP_USER | __GFP_NOWARN);
|
||||
if (unlikely(!ids))
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -7199,6 +7201,8 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
|
|||
|
||||
if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
|
||||
net_enable_timestamp();
|
||||
|
||||
security_sk_clone(sk, newsk);
|
||||
}
|
||||
|
||||
static inline void sctp_copy_descendant(struct sock *sk_to,
|
||||
|
|
|
@ -257,6 +257,7 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
|
|||
}
|
||||
init_waitqueue_head(&wq->wait);
|
||||
wq->fasync_list = NULL;
|
||||
wq->flags = 0;
|
||||
RCU_INIT_POINTER(ei->socket.wq, wq);
|
||||
|
||||
ei->socket.state = SS_UNCONNECTED;
|
||||
|
|
|
@ -2826,7 +2826,6 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
|
|||
|
||||
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
|
||||
{
|
||||
struct net *net;
|
||||
int err = 0;
|
||||
if (unlikely(afinfo == NULL))
|
||||
return -EINVAL;
|
||||
|
@ -2857,26 +2856,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
|
|||
}
|
||||
spin_unlock(&xfrm_policy_afinfo_lock);
|
||||
|
||||
rtnl_lock();
|
||||
for_each_net(net) {
|
||||
struct dst_ops *xfrm_dst_ops;
|
||||
|
||||
switch (afinfo->family) {
|
||||
case AF_INET:
|
||||
xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
|
||||
break;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case AF_INET6:
|
||||
xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
*xfrm_dst_ops = *afinfo->dst_ops;
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(xfrm_policy_register_afinfo);
|
||||
|
@ -2912,22 +2891,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
|
|||
}
|
||||
EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
|
||||
|
||||
static void __net_init xfrm_dst_ops_init(struct net *net)
|
||||
{
|
||||
struct xfrm_policy_afinfo *afinfo;
|
||||
|
||||
rcu_read_lock();
|
||||
afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
|
||||
if (afinfo)
|
||||
net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
|
||||
if (afinfo)
|
||||
net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
|
||||
#endif
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
|
@ -3076,7 +3039,6 @@ static int __net_init xfrm_net_init(struct net *net)
|
|||
rv = xfrm_policy_init(net);
|
||||
if (rv < 0)
|
||||
goto out_policy;
|
||||
xfrm_dst_ops_init(net);
|
||||
rv = xfrm_sysctl_init(net);
|
||||
if (rv < 0)
|
||||
goto out_sysctl;
|
||||
|
|
|
@ -751,16 +751,16 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
|
|||
|
||||
/* the key is probably readable - now try to read it */
|
||||
can_read_key:
|
||||
ret = key_validate(key);
|
||||
if (ret == 0) {
|
||||
ret = -EOPNOTSUPP;
|
||||
if (key->type->read) {
|
||||
/* read the data with the semaphore held (since we
|
||||
* might sleep) */
|
||||
down_read(&key->sem);
|
||||
ret = -EOPNOTSUPP;
|
||||
if (key->type->read) {
|
||||
/* Read the data with the semaphore held (since we might sleep)
|
||||
* to protect against the key being updated or revoked.
|
||||
*/
|
||||
down_read(&key->sem);
|
||||
ret = key_validate(key);
|
||||
if (ret == 0)
|
||||
ret = key->type->read(key, buffer, buflen);
|
||||
up_read(&key->sem);
|
||||
}
|
||||
up_read(&key->sem);
|
||||
}
|
||||
|
||||
error2:
|
||||
|
|
Loading…
Reference in New Issue