NFS client bugfixes for Linux 5.18
Highlights include: Stable fixes: - SUNRPC: Ensure we flush any closed sockets before xs_xprt_free() Bugfixes: - Fix an Oopsable condition due to SLAB_ACCOUNT setting in the NFSv4.2 xattr code. - Fix for open() using an file open mode of '3' in NFSv4 - Replace readdir's use of xxhash() with hash_64() - Several patches to handle malloc() failure in SUNRPC -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEESQctxSBg8JpV8KqEZwvnipYKAPIFAmJQb5IACgkQZwvnipYK APLVeRAAheQX422gIrjrWsQrZxJWmMDBBj9qwiV14XMs6g5KF/DtNbIJm1BGjA9Y aLgNVknSDcGHMmvdh5MCVF7R61sqkCNbQWP3Bx6OBP6ei3FEgQBPyujcFXAaZ7UK T4fBzfPoEYaxVkEG/L5BsAUnh6TvyCfNm1oqZZv2bv9M0U6UlXnRLWZN9I6cAtcw GI9HgnufWOxJWIqPd9dY45aF44Ru+aJ953eecPh0G83Reoc99EAU+PvJJD18Wl0H BZqM6ar5pNush50yVIjnPFXg+sM97dKGlLvYL11Uy7f8valSaBXPZLZQ/jG4Vx/a m/l8FVwBEV89BG7z6jKNju7ERbO+xbPgXP8lSwkj69fXOpuvzo/G6VAxS6ZJww12 p6TJnhCMSEF7qrQc5mejA803dT4MiJWo4i2th482Ws/tRN5H+y9pDYLsvBPM8iHo sVkJBco04tBHEH3qKgDTu8EY7/shH+GVO3Wmtcjz47T2rbmQB7gJtfipHcLmV2qd Jy1tiz1T7rhGs7KNtWpu8210MY9iFhIt3rAvdGdeVmgnNjrmRex0Sxqx8vgKUAFE aQjXkkpwHA3rHWnuGPMKu5i48BPFQ8m4QgdVC9F6ylNT6RIbImqplQiwIYNIxYC+ cBohUG1yYZRjFEMkXBdfK5cImkTLW7RkLCatRh5v8OU8xqpRVL8= =edfs -----END PGP SIGNATURE----- Merge tag 'nfs-for-5.18-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs Pull NFS client fixes from Trond Myklebust: "Stable fixes: - SUNRPC: Ensure we flush any closed sockets before xs_xprt_free() Bugfixes: - Fix an Oopsable condition due to SLAB_ACCOUNT setting in the NFSv4.2 xattr code. - Fix for open() using an file open mode of '3' in NFSv4 - Replace readdir's use of xxhash() with hash_64() - Several patches to handle malloc() failure in SUNRPC" * tag 'nfs-for-5.18-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: SUNRPC: Move the call to xprt_send_pagedata() out of xprt_sock_sendmsg() SUNRPC: svc_tcp_sendmsg() should handle errors from xdr_alloc_bvec() SUNRPC: Handle allocation failure in rpc_new_task() NFS: Ensure rpc_run_task() cannot fail in nfs_async_rename() NFSv4/pnfs: Handle RPC allocation errors in nfs4_proc_layoutget SUNRPC: Handle low memory situations in call_status() SUNRPC: Handle ENOMEM in call_transmit_status() NFSv4.2: Fix missing removal of SLAB_ACCOUNT on kmem_cache allocation SUNRPC: Ensure we flush any closed sockets before xs_xprt_free() NFS: Replace readdir's use of xxhash() with hash_64() SUNRPC: handle malloc failure in ->request_prepare NFSv4: fix open failure with O_ACCMODE flag Revert "NFSv4: Handle the special Linux file open access mode"
This commit is contained in:
commit
1a3b1bba7c
|
@ -412,6 +412,7 @@ void __fput_sync(struct file *file)
|
|||
}
|
||||
|
||||
EXPORT_SYMBOL(fput);
|
||||
EXPORT_SYMBOL(__fput_sync);
|
||||
|
||||
void __init files_init(void)
|
||||
{
|
||||
|
|
|
@ -4,10 +4,6 @@ config NFS_FS
|
|||
depends on INET && FILE_LOCKING && MULTIUSER
|
||||
select LOCKD
|
||||
select SUNRPC
|
||||
select CRYPTO
|
||||
select CRYPTO_HASH
|
||||
select XXHASH
|
||||
select CRYPTO_XXHASH
|
||||
select NFS_ACL_SUPPORT if NFS_V3_ACL
|
||||
help
|
||||
Choose Y here if you want to access files residing on other
|
||||
|
|
19
fs/nfs/dir.c
19
fs/nfs/dir.c
|
@ -39,7 +39,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/xattr.h>
|
||||
#include <linux/xxhash.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
#include "delegation.h"
|
||||
#include "iostat.h"
|
||||
|
@ -350,10 +350,7 @@ static int nfs_readdir_page_array_append(struct page *page,
|
|||
* of directory cookies. Content is addressed by the value of the
|
||||
* cookie index of the first readdir entry in a page.
|
||||
*
|
||||
* The xxhash algorithm is chosen because it is fast, and is supposed
|
||||
* to result in a decent flat distribution of hashes.
|
||||
*
|
||||
* We then select only the first 18 bits to avoid issues with excessive
|
||||
* We select only the first 18 bits to avoid issues with excessive
|
||||
* memory use for the page cache XArray. 18 bits should allow the caching
|
||||
* of 262144 pages of sequences of readdir entries. Since each page holds
|
||||
* 127 readdir entries for a typical 64-bit system, that works out to a
|
||||
|
@ -363,7 +360,7 @@ static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie)
|
|||
{
|
||||
if (cookie == 0)
|
||||
return 0;
|
||||
return xxhash(&cookie, sizeof(cookie), 0) & NFS_READDIR_COOKIE_MASK;
|
||||
return hash_64(cookie, 18);
|
||||
}
|
||||
|
||||
static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie,
|
||||
|
@ -1991,16 +1988,6 @@ const struct dentry_operations nfs4_dentry_operations = {
|
|||
};
|
||||
EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
|
||||
|
||||
static fmode_t flags_to_mode(int flags)
|
||||
{
|
||||
fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
|
||||
if ((flags & O_ACCMODE) != O_WRONLY)
|
||||
res |= FMODE_READ;
|
||||
if ((flags & O_ACCMODE) != O_RDONLY)
|
||||
res |= FMODE_WRITE;
|
||||
return res;
|
||||
}
|
||||
|
||||
static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
|
||||
{
|
||||
return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
|
||||
|
|
|
@ -1180,7 +1180,6 @@ int nfs_open(struct inode *inode, struct file *filp)
|
|||
nfs_fscache_open_file(inode, filp);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_open);
|
||||
|
||||
/*
|
||||
* This function is called whenever some part of NFS notices that
|
||||
|
|
|
@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline fmode_t flags_to_mode(int flags)
|
||||
{
|
||||
fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
|
||||
if ((flags & O_ACCMODE) != O_WRONLY)
|
||||
res |= FMODE_READ;
|
||||
if ((flags & O_ACCMODE) != O_RDONLY)
|
||||
res |= FMODE_WRITE;
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: RFC 1813 doesn't limit the number of auth flavors that
|
||||
* a server can return, so make something up.
|
||||
|
|
|
@ -997,7 +997,7 @@ int __init nfs4_xattr_cache_init(void)
|
|||
|
||||
nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
|
||||
sizeof(struct nfs4_xattr_cache), 0,
|
||||
(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
|
||||
(SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
|
||||
nfs4_xattr_cache_init_once);
|
||||
if (nfs4_xattr_cache_cachep == NULL)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
|
|||
struct dentry *parent = NULL;
|
||||
struct inode *dir;
|
||||
unsigned openflags = filp->f_flags;
|
||||
fmode_t f_mode;
|
||||
struct iattr attr;
|
||||
int err;
|
||||
|
||||
|
@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
f_mode = filp->f_mode;
|
||||
if ((openflags & O_ACCMODE) == 3)
|
||||
return nfs_open(inode, filp);
|
||||
f_mode |= flags_to_mode(openflags);
|
||||
|
||||
/* We can't create new files here */
|
||||
openflags &= ~(O_CREAT|O_EXCL);
|
||||
|
@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
|
|||
parent = dget_parent(dentry);
|
||||
dir = d_inode(parent);
|
||||
|
||||
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
|
||||
ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp);
|
||||
err = PTR_ERR(ctx);
|
||||
if (IS_ERR(ctx))
|
||||
goto out;
|
||||
|
|
|
@ -9615,6 +9615,8 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
|
|||
nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
|
||||
|
||||
task = rpc_run_task(&task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return ERR_CAST(task);
|
||||
|
||||
status = rpc_wait_for_completion_task(task);
|
||||
if (status != 0)
|
||||
|
|
|
@ -347,6 +347,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
|
|||
data = kzalloc(sizeof(*data), GFP_KERNEL);
|
||||
if (data == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
task_setup_data.task = &data->task;
|
||||
task_setup_data.callback_data = data;
|
||||
|
||||
data->cred = get_current_cred();
|
||||
|
|
|
@ -1694,6 +1694,7 @@ struct nfs_unlinkdata {
|
|||
struct nfs_renamedata {
|
||||
struct nfs_renameargs args;
|
||||
struct nfs_renameres res;
|
||||
struct rpc_task task;
|
||||
const struct cred *cred;
|
||||
struct inode *old_dir;
|
||||
struct dentry *old_dentry;
|
||||
|
|
|
@ -144,7 +144,7 @@ struct rpc_xprt_ops {
|
|||
unsigned short (*get_srcport)(struct rpc_xprt *xprt);
|
||||
int (*buf_alloc)(struct rpc_task *task);
|
||||
void (*buf_free)(struct rpc_task *task);
|
||||
void (*prepare_request)(struct rpc_rqst *req);
|
||||
int (*prepare_request)(struct rpc_rqst *req);
|
||||
int (*send_request)(struct rpc_rqst *req);
|
||||
void (*wait_for_reply_request)(struct rpc_task *task);
|
||||
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
|
@ -358,10 +358,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
|
|||
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
|
||||
void xprt_free_slot(struct rpc_xprt *xprt,
|
||||
struct rpc_rqst *req);
|
||||
void xprt_request_prepare(struct rpc_rqst *req);
|
||||
bool xprt_prepare_transmit(struct rpc_task *task);
|
||||
void xprt_request_enqueue_transmit(struct rpc_task *task);
|
||||
void xprt_request_enqueue_receive(struct rpc_task *task);
|
||||
int xprt_request_enqueue_receive(struct rpc_task *task);
|
||||
void xprt_request_wait_receive(struct rpc_task *task);
|
||||
void xprt_request_dequeue_xprt(struct rpc_task *task);
|
||||
bool xprt_request_need_retransmit(struct rpc_task *task);
|
||||
|
|
|
@ -1004,7 +1004,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
|
|||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
|
||||
DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
|
||||
|
||||
DECLARE_EVENT_CLASS(rpc_xprt_event,
|
||||
|
|
|
@ -1127,6 +1127,8 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
|
|||
struct rpc_task *task;
|
||||
|
||||
task = rpc_new_task(task_setup_data);
|
||||
if (IS_ERR(task))
|
||||
return task;
|
||||
|
||||
if (!RPC_IS_ASYNC(task))
|
||||
task->tk_flags |= RPC_TASK_CRED_NOREF;
|
||||
|
@ -1227,6 +1229,11 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
|
|||
* Create an rpc_task to send the data
|
||||
*/
|
||||
task = rpc_new_task(&task_setup_data);
|
||||
if (IS_ERR(task)) {
|
||||
xprt_free_bc_request(req);
|
||||
return task;
|
||||
}
|
||||
|
||||
xprt_init_bc_request(req, task);
|
||||
|
||||
task->tk_action = call_bc_encode;
|
||||
|
@ -1858,6 +1865,9 @@ call_encode(struct rpc_task *task)
|
|||
xprt_request_dequeue_xprt(task);
|
||||
/* Encode here so that rpcsec_gss can use correct sequence number. */
|
||||
rpc_xdr_encode(task);
|
||||
/* Add task to reply queue before transmission to avoid races */
|
||||
if (task->tk_status == 0 && rpc_reply_expected(task))
|
||||
task->tk_status = xprt_request_enqueue_receive(task);
|
||||
/* Did the encode result in an error condition? */
|
||||
if (task->tk_status != 0) {
|
||||
/* Was the error nonfatal? */
|
||||
|
@ -1881,9 +1891,6 @@ call_encode(struct rpc_task *task)
|
|||
return;
|
||||
}
|
||||
|
||||
/* Add task to reply queue before transmission to avoid races */
|
||||
if (rpc_reply_expected(task))
|
||||
xprt_request_enqueue_receive(task);
|
||||
xprt_request_enqueue_transmit(task);
|
||||
out:
|
||||
task->tk_action = call_transmit;
|
||||
|
@ -2200,6 +2207,7 @@ call_transmit_status(struct rpc_task *task)
|
|||
* socket just returned a connection error,
|
||||
* then hold onto the transport lock.
|
||||
*/
|
||||
case -ENOMEM:
|
||||
case -ENOBUFS:
|
||||
rpc_delay(task, HZ>>2);
|
||||
fallthrough;
|
||||
|
@ -2283,6 +2291,7 @@ call_bc_transmit_status(struct rpc_task *task)
|
|||
case -ENOTCONN:
|
||||
case -EPIPE:
|
||||
break;
|
||||
case -ENOMEM:
|
||||
case -ENOBUFS:
|
||||
rpc_delay(task, HZ>>2);
|
||||
fallthrough;
|
||||
|
@ -2365,6 +2374,11 @@ call_status(struct rpc_task *task)
|
|||
case -EPIPE:
|
||||
case -EAGAIN:
|
||||
break;
|
||||
case -ENFILE:
|
||||
case -ENOBUFS:
|
||||
case -ENOMEM:
|
||||
rpc_delay(task, HZ>>2);
|
||||
break;
|
||||
case -EIO:
|
||||
/* shutdown or soft timeout */
|
||||
goto out_exit;
|
||||
|
|
|
@ -1128,6 +1128,11 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
|
|||
|
||||
if (task == NULL) {
|
||||
task = rpc_alloc_task();
|
||||
if (task == NULL) {
|
||||
rpc_release_calldata(setup_data->callback_ops,
|
||||
setup_data->callback_data);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
flags = RPC_TASK_DYNAMIC;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,12 +221,6 @@ static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
|
|||
static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
|
||||
struct xdr_buf *xdr, size_t base)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
|
||||
xdr->page_len + xdr->page_base);
|
||||
return xprt_sendmsg(sock, msg, base + xdr->page_base);
|
||||
|
|
|
@ -579,15 +579,18 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
|
|||
if (svc_xprt_is_dead(xprt))
|
||||
goto out_notconn;
|
||||
|
||||
err = xdr_alloc_bvec(xdr, GFP_KERNEL);
|
||||
if (err < 0)
|
||||
goto out_unlock;
|
||||
|
||||
err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
|
||||
xdr_free_bvec(xdr);
|
||||
if (err == -ECONNREFUSED) {
|
||||
/* ICMP error on earlier request. */
|
||||
err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
|
||||
xdr_free_bvec(xdr);
|
||||
}
|
||||
xdr_free_bvec(xdr);
|
||||
trace_svcsock_udp_send(xprt, err);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&xprt->xpt_mutex);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
@ -1096,7 +1099,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
|
|||
int ret;
|
||||
|
||||
*sentp = 0;
|
||||
xdr_alloc_bvec(xdr, GFP_KERNEL);
|
||||
ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -73,6 +73,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net);
|
|||
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
|
||||
static void xprt_destroy(struct rpc_xprt *xprt);
|
||||
static void xprt_request_init(struct rpc_task *task);
|
||||
static int xprt_request_prepare(struct rpc_rqst *req);
|
||||
|
||||
static DEFINE_SPINLOCK(xprt_list_lock);
|
||||
static LIST_HEAD(xprt_list);
|
||||
|
@ -929,12 +930,7 @@ void xprt_connect(struct rpc_task *task)
|
|||
if (!xprt_lock_write(xprt, task))
|
||||
return;
|
||||
|
||||
if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
|
||||
trace_xprt_disconnect_cleanup(xprt);
|
||||
xprt->ops->close(xprt);
|
||||
}
|
||||
|
||||
if (!xprt_connected(xprt)) {
|
||||
if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
|
||||
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
|
||||
rpc_sleep_on_timeout(&xprt->pending, task, NULL,
|
||||
xprt_request_timeout(task->tk_rqstp));
|
||||
|
@ -1143,16 +1139,19 @@ xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
|
|||
* @task: RPC task
|
||||
*
|
||||
*/
|
||||
void
|
||||
int
|
||||
xprt_request_enqueue_receive(struct rpc_task *task)
|
||||
{
|
||||
struct rpc_rqst *req = task->tk_rqstp;
|
||||
struct rpc_xprt *xprt = req->rq_xprt;
|
||||
int ret;
|
||||
|
||||
if (!xprt_request_need_enqueue_receive(task, req))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
xprt_request_prepare(task->tk_rqstp);
|
||||
ret = xprt_request_prepare(task->tk_rqstp);
|
||||
if (ret)
|
||||
return ret;
|
||||
spin_lock(&xprt->queue_lock);
|
||||
|
||||
/* Update the softirq receive buffer */
|
||||
|
@ -1166,6 +1165,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
|
|||
|
||||
/* Turn off autodisconnect */
|
||||
del_singleshot_timer_sync(&xprt->timer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1452,14 +1452,16 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
|
|||
*
|
||||
* Calls into the transport layer to do whatever is needed to prepare
|
||||
* the request for transmission or receive.
|
||||
* Returns error, or zero.
|
||||
*/
|
||||
void
|
||||
static int
|
||||
xprt_request_prepare(struct rpc_rqst *req)
|
||||
{
|
||||
struct rpc_xprt *xprt = req->rq_xprt;
|
||||
|
||||
if (xprt->ops->prepare_request)
|
||||
xprt->ops->prepare_request(req);
|
||||
return xprt->ops->prepare_request(req);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -822,12 +822,17 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
xs_stream_prepare_request(struct rpc_rqst *req)
|
||||
{
|
||||
gfp_t gfp = rpc_task_gfp_mask();
|
||||
int ret;
|
||||
|
||||
ret = xdr_alloc_bvec(&req->rq_snd_buf, gfp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
xdr_free_bvec(&req->rq_rcv_buf);
|
||||
req->rq_task->tk_status = xdr_alloc_bvec(
|
||||
&req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
|
||||
return xdr_alloc_bvec(&req->rq_rcv_buf, gfp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -879,7 +884,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
|
|||
|
||||
/* Close the stream if the previous transmission was incomplete */
|
||||
if (xs_send_request_was_aborted(transport, req)) {
|
||||
xs_close(xprt);
|
||||
xprt_force_disconnect(xprt);
|
||||
return -ENOTCONN;
|
||||
}
|
||||
|
||||
|
@ -915,7 +920,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
|
|||
-status);
|
||||
fallthrough;
|
||||
case -EPIPE:
|
||||
xs_close(xprt);
|
||||
xprt_force_disconnect(xprt);
|
||||
status = -ENOTCONN;
|
||||
}
|
||||
|
||||
|
@ -956,6 +961,9 @@ static int xs_udp_send_request(struct rpc_rqst *req)
|
|||
if (!xprt_request_get_cong(xprt, req))
|
||||
return -EBADSLT;
|
||||
|
||||
status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
|
||||
if (status < 0)
|
||||
return status;
|
||||
req->rq_xtime = ktime_get();
|
||||
status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
|
||||
|
||||
|
@ -1185,6 +1193,16 @@ static void xs_reset_transport(struct sock_xprt *transport)
|
|||
|
||||
if (sk == NULL)
|
||||
return;
|
||||
/*
|
||||
* Make sure we're calling this in a context from which it is safe
|
||||
* to call __fput_sync(). In practice that means rpciod and the
|
||||
* system workqueue.
|
||||
*/
|
||||
if (!(current->flags & PF_WQ_WORKER)) {
|
||||
WARN_ON_ONCE(1);
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
return;
|
||||
}
|
||||
|
||||
if (atomic_read(&transport->xprt.swapper))
|
||||
sk_clear_memalloc(sk);
|
||||
|
@ -1208,7 +1226,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
|
|||
mutex_unlock(&transport->recv_mutex);
|
||||
|
||||
trace_rpc_socket_close(xprt, sock);
|
||||
fput(filp);
|
||||
__fput_sync(filp);
|
||||
|
||||
xprt_disconnect_done(xprt);
|
||||
}
|
||||
|
@ -2544,6 +2562,9 @@ static int bc_sendto(struct rpc_rqst *req)
|
|||
int err;
|
||||
|
||||
req->rq_xtime = ktime_get();
|
||||
err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
|
||||
if (err < 0)
|
||||
return err;
|
||||
err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
|
||||
xdr_free_bvec(xdr);
|
||||
if (err < 0 || sent != (xdr->len + sizeof(marker)))
|
||||
|
|
Loading…
Reference in New Issue