mirror of https://gitee.com/openkylin/linux.git
NFS Client Updates for Linux 5.15
- New Features: - Better client responsiveness when server isn't replying - Use refcount_t in sunrpc rpc_client refcount tracking - Add srcaddr and dst_port to the sunrpc sysfs info files - Add basic support for connection sharing between servers with multiple NICs` - Bugfixes and Cleanups: - Sunrpc tracepoint cleanups - Disconnect after ib_post_send() errors to avoid deadlocks - Fix for tearing down rpcrdma_reps - Fix a potential pNFS layoutget livelock loop - pNFS layout barrier fixes - Fix a potential memory corruption in rpc_wake_up_queued_task_set_status() - Fix reconnection locking - Fix return value of get_srcport() - Remove rpcrdma_post_sends() - Remove pNFS dead code - Remove copy size restriction for inter-server copies - Overhaul the NFS callback service - Clean up sunrpc TCP socket shutdowns - Always provide aligned buffers to RPC read layers -----BEGIN PGP SIGNATURE----- iQIyBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAmExP7AACgkQ18tUv7Cl QOshTg/zBz7OfrS23CcLLgNidTJ6S7JOuj1DShG+YzsYXT8f9Nl1DadLM7yAEyok 6JZzC8rXYzJcmYztHZzRyTuzj1+tGGb0u/MrD0bBk42VEel6eOjH/Y9ybn12Gf/E aqlcJh8hPx44U8oo5EFjRJsg2h28O06vywqhJz+sTbkqKN4hlAgMOo5ysAB+1thg BrTlR84EKBw5QqxPJ1WPmq9tEyGebU9Yrj1p8f0Uf015IeRNeTOXx3NzmdPshphf 2yJvjumwEzqkcHXTJFDfP6ikIcGPPMNVAOK8DHb+vDGzNsOXW7dDM7GuWA3U8DlU ZHvyyb05Wwe6Wwg8xwx90FEXcYZFfZbSKmI9z2uoOuGFzNG07zWzPDzRft+qrOvU VMMwP9oEh71+qesmWTvqIbR2RjxqbCYlTcc8mBrD66DROi6jZ2jznraNC85sxG0Q b8GE+2SnYr2Q25yehj2xrRlOXyiYNkeeYmIpIquEqH9o7cSyDNJhBWbzIv6x+ith O/S06ZVKMc9X1nH5t5121XcHrSTMMVA/67WMyKfKMxWnrADAWPQALG+ttoTcbRu7 Txew3Jb+hB8+ZdHAqbPf1l1i+7USQl1CRHMw3GRvNjCL2qcjZb1R7eyJRSQQtUyw q6SJRGe6Sn1FTUnn96Hv15Zy8VHx+q0cOL/EQVzL1RzJIXYcag== =Ad/3 -----END PGP SIGNATURE----- Merge tag 'nfs-for-5.15-1' of git://git.linux-nfs.org/projects/anna/linux-nfs Pull NFS client updates from Anna Schumaker: "New Features: - Better client responsiveness when server isn't replying - Use refcount_t in sunrpc rpc_client refcount tracking - Add srcaddr and dst_port to the sunrpc sysfs info files - Add basic support for connection sharing between servers with multiple NICs` Bugfixes and Cleanups: - Sunrpc tracepoint cleanups - Disconnect after ib_post_send() errors to avoid deadlocks - Fix for tearing down rpcrdma_reps - Fix a potential pNFS layoutget livelock loop - pNFS layout barrier fixes - Fix a potential memory corruption in rpc_wake_up_queued_task_set_status() - Fix reconnection locking - Fix return value of get_srcport() - Remove rpcrdma_post_sends() - Remove pNFS dead code - Remove copy size restriction for inter-server copies - Overhaul the NFS callback service - Clean up sunrpc TCP socket shutdowns - Always provide aligned buffers to RPC read layers" * tag 'nfs-for-5.15-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (39 commits) NFS: Always provide aligned buffers to the RPC read layers NFSv4.1 add network transport when session trunking is detected SUNRPC enforce creation of no more than max_connect xprts NFSv4 introduce max_connect mount options SUNRPC add xps_nunique_destaddr_xprts to xprt_switch_info in sysfs SUNRPC keep track of number of transports to unique addresses NFSv3: Delete duplicate judgement in nfs3_async_handle_jukebox SUNRPC: Tweak TCP socket shutdown in the RPC client SUNRPC: Simplify socket shutdown when not reusing TCP ports NFSv4.2: remove restriction of copy size for inter-server copy. NFS: Clean up the synopsis of callback process_op() NFS: Extract the xdr_init_encode/decode() calls from decode_compound NFS: Remove unused callback void decoder NFS: Add a private local dispatcher for NFSv4 callback operations SUNRPC: Eliminate the RQ_AUTHERR flag SUNRPC: Set rq_auth_stat in the pg_authenticate() callout SUNRPC: Add svc_rqst::rq_auth_stat SUNRPC: Add dst_port to the sysfs xprt info file SUNRPC: Add srcaddr as a file in sysfs sunrpc: Fix return value of get_srcport() ...
This commit is contained in:
commit
0961f0c00e
|
@ -649,6 +649,7 @@ static int lockd_authenticate(struct svc_rqst *rqstp)
|
|||
switch (rqstp->rq_authop->flavour) {
|
||||
case RPC_AUTH_NULL:
|
||||
case RPC_AUTH_UNIX:
|
||||
rqstp->rq_auth_stat = rpc_auth_ok;
|
||||
if (rqstp->rq_proc == 0)
|
||||
return SVC_OK;
|
||||
if (is_callback(rqstp->rq_proc)) {
|
||||
|
@ -659,6 +660,7 @@ static int lockd_authenticate(struct svc_rqst *rqstp)
|
|||
}
|
||||
return svc_set_client(rqstp);
|
||||
}
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
|
||||
|
|
|
@ -429,6 +429,8 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
|
|||
*/
|
||||
static int nfs_callback_authenticate(struct svc_rqst *rqstp)
|
||||
{
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
|
||||
switch (rqstp->rq_authop->flavour) {
|
||||
case RPC_AUTH_NULL:
|
||||
if (rqstp->rq_proc != CB_NULL)
|
||||
|
@ -439,6 +441,8 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
|
|||
if (svc_is_backchannel(rqstp))
|
||||
return SVC_DENIED;
|
||||
}
|
||||
|
||||
rqstp->rq_auth_stat = rpc_auth_ok;
|
||||
return SVC_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,11 +63,10 @@ static __be32 nfs4_callback_null(struct svc_rqst *rqstp)
|
|||
return htonl(NFS4_OK);
|
||||
}
|
||||
|
||||
static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p)
|
||||
{
|
||||
return xdr_argsize_check(rqstp, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* svc_process_common() looks for an XDR encoder to know when
|
||||
* not to drop a Reply.
|
||||
*/
|
||||
static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p)
|
||||
{
|
||||
return xdr_ressize_check(rqstp, p);
|
||||
|
@ -864,17 +863,16 @@ preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
|
|||
}
|
||||
|
||||
static __be32 process_op(int nop, struct svc_rqst *rqstp,
|
||||
struct xdr_stream *xdr_in, void *argp,
|
||||
struct xdr_stream *xdr_out, void *resp,
|
||||
struct cb_process_state *cps)
|
||||
struct cb_process_state *cps)
|
||||
{
|
||||
struct xdr_stream *xdr_out = &rqstp->rq_res_stream;
|
||||
struct callback_op *op = &callback_ops[0];
|
||||
unsigned int op_nr;
|
||||
__be32 status;
|
||||
long maxlen;
|
||||
__be32 res;
|
||||
|
||||
status = decode_op_hdr(xdr_in, &op_nr);
|
||||
status = decode_op_hdr(&rqstp->rq_arg_stream, &op_nr);
|
||||
if (unlikely(status))
|
||||
return status;
|
||||
|
||||
|
@ -904,9 +902,11 @@ static __be32 process_op(int nop, struct svc_rqst *rqstp,
|
|||
|
||||
maxlen = xdr_out->end - xdr_out->p;
|
||||
if (maxlen > 0 && maxlen < PAGE_SIZE) {
|
||||
status = op->decode_args(rqstp, xdr_in, argp);
|
||||
status = op->decode_args(rqstp, &rqstp->rq_arg_stream,
|
||||
rqstp->rq_argp);
|
||||
if (likely(status == 0))
|
||||
status = op->process_op(argp, resp, cps);
|
||||
status = op->process_op(rqstp->rq_argp, rqstp->rq_resp,
|
||||
cps);
|
||||
} else
|
||||
status = htonl(NFS4ERR_RESOURCE);
|
||||
|
||||
|
@ -915,7 +915,7 @@ static __be32 process_op(int nop, struct svc_rqst *rqstp,
|
|||
if (unlikely(res))
|
||||
return res;
|
||||
if (op->encode_res != NULL && status == 0)
|
||||
status = op->encode_res(rqstp, xdr_out, resp);
|
||||
status = op->encode_res(rqstp, xdr_out, rqstp->rq_resp);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -926,22 +926,15 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
|
|||
{
|
||||
struct cb_compound_hdr_arg hdr_arg = { 0 };
|
||||
struct cb_compound_hdr_res hdr_res = { NULL };
|
||||
struct xdr_stream xdr_in, xdr_out;
|
||||
__be32 *p, status;
|
||||
struct cb_process_state cps = {
|
||||
.drc_status = 0,
|
||||
.clp = NULL,
|
||||
.net = SVC_NET(rqstp),
|
||||
};
|
||||
unsigned int nops = 0;
|
||||
__be32 status;
|
||||
|
||||
xdr_init_decode(&xdr_in, &rqstp->rq_arg,
|
||||
rqstp->rq_arg.head[0].iov_base, NULL);
|
||||
|
||||
p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
|
||||
xdr_init_encode(&xdr_out, &rqstp->rq_res, p, NULL);
|
||||
|
||||
status = decode_compound_hdr_arg(&xdr_in, &hdr_arg);
|
||||
status = decode_compound_hdr_arg(&rqstp->rq_arg_stream, &hdr_arg);
|
||||
if (status == htonl(NFS4ERR_RESOURCE))
|
||||
return rpc_garbage_args;
|
||||
|
||||
|
@ -961,15 +954,13 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
|
|||
cps.minorversion = hdr_arg.minorversion;
|
||||
hdr_res.taglen = hdr_arg.taglen;
|
||||
hdr_res.tag = hdr_arg.tag;
|
||||
if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
|
||||
if (encode_compound_hdr_res(&rqstp->rq_res_stream, &hdr_res) != 0) {
|
||||
if (cps.clp)
|
||||
nfs_put_client(cps.clp);
|
||||
return rpc_system_err;
|
||||
}
|
||||
while (status == 0 && nops != hdr_arg.nops) {
|
||||
status = process_op(nops, rqstp, &xdr_in,
|
||||
rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
|
||||
&cps);
|
||||
status = process_op(nops, rqstp, &cps);
|
||||
nops++;
|
||||
}
|
||||
|
||||
|
@ -988,7 +979,20 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
|
|||
|
||||
out_invalidcred:
|
||||
pr_warn_ratelimited("NFS: NFSv4 callback contains invalid cred\n");
|
||||
return svc_return_autherr(rqstp, rpc_autherr_badcred);
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
return rpc_success;
|
||||
}
|
||||
|
||||
static int
|
||||
nfs_callback_dispatch(struct svc_rqst *rqstp, __be32 *statp)
|
||||
{
|
||||
const struct svc_procedure *procp = rqstp->rq_procinfo;
|
||||
|
||||
svcxdr_init_decode(rqstp);
|
||||
svcxdr_init_encode(rqstp);
|
||||
|
||||
*statp = procp->pc_func(rqstp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1057,7 +1061,6 @@ static struct callback_op callback_ops[] = {
|
|||
static const struct svc_procedure nfs4_callback_procedures1[] = {
|
||||
[CB_NULL] = {
|
||||
.pc_func = nfs4_callback_null,
|
||||
.pc_decode = nfs4_decode_void,
|
||||
.pc_encode = nfs4_encode_void,
|
||||
.pc_xdrressize = 1,
|
||||
.pc_name = "NULL",
|
||||
|
@ -1079,7 +1082,7 @@ const struct svc_version nfs4_callback_version1 = {
|
|||
.vs_proc = nfs4_callback_procedures1,
|
||||
.vs_count = nfs4_callback_count1,
|
||||
.vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
|
||||
.vs_dispatch = NULL,
|
||||
.vs_dispatch = nfs_callback_dispatch,
|
||||
.vs_hidden = true,
|
||||
.vs_need_cong_ctrl = true,
|
||||
};
|
||||
|
@ -1091,7 +1094,7 @@ const struct svc_version nfs4_callback_version4 = {
|
|||
.vs_proc = nfs4_callback_procedures1,
|
||||
.vs_count = nfs4_callback_count4,
|
||||
.vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
|
||||
.vs_dispatch = NULL,
|
||||
.vs_dispatch = nfs_callback_dispatch,
|
||||
.vs_hidden = true,
|
||||
.vs_need_cong_ctrl = true,
|
||||
};
|
||||
|
|
|
@ -179,6 +179,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
|
|||
|
||||
clp->cl_proto = cl_init->proto;
|
||||
clp->cl_nconnect = cl_init->nconnect;
|
||||
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
|
||||
clp->cl_net = get_net(cl_init->net);
|
||||
|
||||
clp->cl_principal = "*";
|
||||
|
@ -540,6 +541,7 @@ int nfs_create_rpc_client(struct nfs_client *clp,
|
|||
|
||||
clnt->cl_principal = clp->cl_principal;
|
||||
clp->cl_rpcclient = clnt;
|
||||
clnt->cl_max_connect = clp->cl_max_connect;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_create_rpc_client);
|
||||
|
|
|
@ -60,6 +60,7 @@ enum nfs_param {
|
|||
Opt_mountvers,
|
||||
Opt_namelen,
|
||||
Opt_nconnect,
|
||||
Opt_max_connect,
|
||||
Opt_port,
|
||||
Opt_posix,
|
||||
Opt_proto,
|
||||
|
@ -158,6 +159,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
|
|||
fsparam_u32 ("mountvers", Opt_mountvers),
|
||||
fsparam_u32 ("namlen", Opt_namelen),
|
||||
fsparam_u32 ("nconnect", Opt_nconnect),
|
||||
fsparam_u32 ("max_connect", Opt_max_connect),
|
||||
fsparam_string("nfsvers", Opt_vers),
|
||||
fsparam_u32 ("port", Opt_port),
|
||||
fsparam_flag_no("posix", Opt_posix),
|
||||
|
@ -770,6 +772,11 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
|
|||
goto out_of_bounds;
|
||||
ctx->nfs_server.nconnect = result.uint_32;
|
||||
break;
|
||||
case Opt_max_connect:
|
||||
if (result.uint_32 < 1 || result.uint_32 > NFS_MAX_TRANSPORTS)
|
||||
goto out_of_bounds;
|
||||
ctx->nfs_server.max_connect = result.uint_32;
|
||||
break;
|
||||
case Opt_lookupcache:
|
||||
switch (result.uint_32) {
|
||||
case Opt_lookupcache_all:
|
||||
|
|
|
@ -67,6 +67,7 @@ struct nfs_client_initdata {
|
|||
int proto;
|
||||
u32 minorversion;
|
||||
unsigned int nconnect;
|
||||
unsigned int max_connect;
|
||||
struct net *net;
|
||||
const struct rpc_timeout *timeparms;
|
||||
const struct cred *cred;
|
||||
|
@ -121,6 +122,7 @@ struct nfs_fs_context {
|
|||
int port;
|
||||
unsigned short protocol;
|
||||
unsigned short nconnect;
|
||||
unsigned short max_connect;
|
||||
unsigned short export_path_len;
|
||||
} nfs_server;
|
||||
|
||||
|
|
|
@ -49,8 +49,7 @@ nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)
|
|||
{
|
||||
if (task->tk_status != -EJUKEBOX)
|
||||
return 0;
|
||||
if (task->tk_status == -EJUKEBOX)
|
||||
nfs_inc_stats(inode, NFSIOS_DELAY);
|
||||
nfs_inc_stats(inode, NFSIOS_DELAY);
|
||||
task->tk_status = 0;
|
||||
rpc_restart_call(task);
|
||||
rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
|
||||
|
|
|
@ -402,6 +402,33 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
|
|||
return nfs4_init_callback(clp);
|
||||
}
|
||||
|
||||
static void nfs4_add_trunk(struct nfs_client *clp, struct nfs_client *old)
|
||||
{
|
||||
struct sockaddr_storage clp_addr, old_addr;
|
||||
struct sockaddr *clp_sap = (struct sockaddr *)&clp_addr;
|
||||
struct sockaddr *old_sap = (struct sockaddr *)&old_addr;
|
||||
size_t clp_salen;
|
||||
struct xprt_create xprt_args = {
|
||||
.ident = old->cl_proto,
|
||||
.net = old->cl_net,
|
||||
.servername = old->cl_hostname,
|
||||
};
|
||||
|
||||
if (clp->cl_proto != old->cl_proto)
|
||||
return;
|
||||
clp_salen = rpc_peeraddr(clp->cl_rpcclient, clp_sap, sizeof(clp_addr));
|
||||
rpc_peeraddr(old->cl_rpcclient, old_sap, sizeof(old_addr));
|
||||
|
||||
if (clp_addr.ss_family != old_addr.ss_family)
|
||||
return;
|
||||
|
||||
xprt_args.dstaddr = clp_sap;
|
||||
xprt_args.addrlen = clp_salen;
|
||||
|
||||
rpc_clnt_add_xprt(old->cl_rpcclient, &xprt_args,
|
||||
rpc_clnt_test_and_add_xprt, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* nfs4_init_client - Initialise an NFS4 client record
|
||||
*
|
||||
|
@ -436,6 +463,8 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
|
|||
* won't try to use it.
|
||||
*/
|
||||
nfs_mark_client_ready(clp, -EPERM);
|
||||
if (old->cl_mvops->session_trunk)
|
||||
nfs4_add_trunk(clp, old);
|
||||
}
|
||||
clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags);
|
||||
nfs_put_client(clp);
|
||||
|
@ -865,6 +894,7 @@ static int nfs4_set_client(struct nfs_server *server,
|
|||
const char *ip_addr,
|
||||
int proto, const struct rpc_timeout *timeparms,
|
||||
u32 minorversion, unsigned int nconnect,
|
||||
unsigned int max_connect,
|
||||
struct net *net)
|
||||
{
|
||||
struct nfs_client_initdata cl_init = {
|
||||
|
@ -883,6 +913,8 @@ static int nfs4_set_client(struct nfs_server *server,
|
|||
|
||||
if (minorversion == 0)
|
||||
__set_bit(NFS_CS_REUSEPORT, &cl_init.init_flags);
|
||||
else
|
||||
cl_init.max_connect = max_connect;
|
||||
if (proto == XPRT_TRANSPORT_TCP)
|
||||
cl_init.nconnect = nconnect;
|
||||
|
||||
|
@ -952,8 +984,10 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
|
|||
return ERR_PTR(-EINVAL);
|
||||
cl_init.hostname = buf;
|
||||
|
||||
if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP)
|
||||
if (mds_clp->cl_nconnect > 1 && ds_proto == XPRT_TRANSPORT_TCP) {
|
||||
cl_init.nconnect = mds_clp->cl_nconnect;
|
||||
cl_init.max_connect = NFS_MAX_TRANSPORTS;
|
||||
}
|
||||
|
||||
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
|
||||
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
|
||||
|
@ -1122,6 +1156,7 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc)
|
|||
&timeparms,
|
||||
ctx->minorversion,
|
||||
ctx->nfs_server.nconnect,
|
||||
ctx->nfs_server.max_connect,
|
||||
fc->net_ns);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
@ -1211,6 +1246,7 @@ struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
|
|||
parent_server->client->cl_timeout,
|
||||
parent_client->cl_mvops->minor_version,
|
||||
parent_client->cl_nconnect,
|
||||
parent_client->cl_max_connect,
|
||||
parent_client->cl_net);
|
||||
if (!error)
|
||||
goto init_server;
|
||||
|
@ -1226,6 +1262,7 @@ struct nfs_server *nfs4_create_referral_server(struct fs_context *fc)
|
|||
parent_server->client->cl_timeout,
|
||||
parent_client->cl_mvops->minor_version,
|
||||
parent_client->cl_nconnect,
|
||||
parent_client->cl_max_connect,
|
||||
parent_client->cl_net);
|
||||
if (error < 0)
|
||||
goto error;
|
||||
|
@ -1323,7 +1360,7 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
|
|||
error = nfs4_set_client(server, hostname, sap, salen, buf,
|
||||
clp->cl_proto, clnt->cl_timeout,
|
||||
clp->cl_minorversion,
|
||||
clp->cl_nconnect, net);
|
||||
clp->cl_nconnect, clp->cl_max_connect, net);
|
||||
clear_bit(NFS_MIG_TSM_POSSIBLE, &server->mig_status);
|
||||
if (error != 0) {
|
||||
nfs_server_insert_lists(server);
|
||||
|
|
|
@ -158,13 +158,11 @@ static ssize_t __nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
|
|||
sync = true;
|
||||
retry:
|
||||
if (!nfs42_files_from_same_server(file_in, file_out)) {
|
||||
/* for inter copy, if copy size if smaller than 12 RPC
|
||||
* payloads, fallback to traditional copy. There are
|
||||
* 14 RPCs during an NFSv4.x mount between source/dest
|
||||
* servers.
|
||||
/*
|
||||
* for inter copy, if copy size is too small
|
||||
* then fallback to generic copy.
|
||||
*/
|
||||
if (sync ||
|
||||
count <= 14 * NFS_SERVER(file_inode(file_in))->rsize)
|
||||
if (sync)
|
||||
return -EOPNOTSUPP;
|
||||
cn_resp = kzalloc(sizeof(struct nfs42_copy_notify_res),
|
||||
GFP_NOFS);
|
||||
|
|
|
@ -335,7 +335,7 @@ static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
|
|||
|
||||
static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
|
||||
{
|
||||
if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
|
||||
if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
|
||||
lo->plh_barrier = newseq;
|
||||
}
|
||||
|
||||
|
@ -347,11 +347,15 @@ pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
|
|||
iomode = IOMODE_ANY;
|
||||
lo->plh_return_iomode = iomode;
|
||||
set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
|
||||
if (seq != 0) {
|
||||
WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
|
||||
/*
|
||||
* We must set lo->plh_return_seq to avoid livelocks with
|
||||
* pnfs_layout_need_return()
|
||||
*/
|
||||
if (seq == 0)
|
||||
seq = be32_to_cpu(lo->plh_stateid.seqid);
|
||||
if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
|
||||
lo->plh_return_seq = seq;
|
||||
pnfs_barrier_update(lo, seq);
|
||||
}
|
||||
pnfs_barrier_update(lo, seq);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -592,10 +596,6 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
|
|||
inode = lo->plh_inode;
|
||||
|
||||
if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
|
||||
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
|
||||
spin_unlock(&inode->i_lock);
|
||||
return;
|
||||
}
|
||||
pnfs_get_layout_hdr(lo);
|
||||
pnfs_layout_remove_lseg(lo, lseg);
|
||||
if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
|
||||
|
@ -1000,7 +1000,7 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
|
|||
{
|
||||
u32 seqid = be32_to_cpu(stateid->seqid);
|
||||
|
||||
return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
|
||||
return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
|
||||
}
|
||||
|
||||
/* lget is set to 1 if called from inside send_layoutget call chain */
|
||||
|
|
|
@ -293,15 +293,19 @@ static int
|
|||
readpage_async_filler(void *data, struct page *page)
|
||||
{
|
||||
struct nfs_readdesc *desc = data;
|
||||
struct inode *inode = page_file_mapping(page)->host;
|
||||
unsigned int rsize = NFS_SERVER(inode)->rsize;
|
||||
struct nfs_page *new;
|
||||
unsigned int len;
|
||||
unsigned int len, aligned_len;
|
||||
int error;
|
||||
|
||||
len = nfs_page_length(page);
|
||||
if (len == 0)
|
||||
return nfs_return_empty_page(page);
|
||||
|
||||
new = nfs_create_request(desc->ctx, page, 0, len);
|
||||
aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
|
||||
|
||||
new = nfs_create_request(desc->ctx, page, 0, aligned_len);
|
||||
if (IS_ERR(new))
|
||||
goto out_error;
|
||||
|
||||
|
|
|
@ -480,6 +480,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
|
|||
if (clp->cl_nconnect > 0)
|
||||
seq_printf(m, ",nconnect=%u", clp->cl_nconnect);
|
||||
if (version == 4) {
|
||||
if (clp->cl_max_connect > 1)
|
||||
seq_printf(m, ",max_connect=%u", clp->cl_max_connect);
|
||||
if (nfss->port != NFS_PORT)
|
||||
seq_printf(m, ",port=%u", nfss->port);
|
||||
} else
|
||||
|
|
|
@ -40,6 +40,11 @@
|
|||
|
||||
#include <linux/mempool.h>
|
||||
|
||||
/*
|
||||
* These are the default for number of transports to different server IPs
|
||||
*/
|
||||
#define NFS_MAX_TRANSPORTS 16
|
||||
|
||||
/*
|
||||
* These are the default flags for swap requests
|
||||
*/
|
||||
|
|
|
@ -62,6 +62,7 @@ struct nfs_client {
|
|||
|
||||
u32 cl_minorversion;/* NFSv4 minorversion */
|
||||
unsigned int cl_nconnect; /* Number of connections */
|
||||
unsigned int cl_max_connect; /* max number of xprts allowed */
|
||||
const char * cl_principal; /* used for machine cred */
|
||||
|
||||
#if IS_ENABLED(CONFIG_NFS_V4)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#include <linux/sunrpc/msg_prot.h>
|
||||
#include <linux/sunrpc/sched.h>
|
||||
|
@ -35,7 +36,7 @@ struct rpc_sysfs_client;
|
|||
* The high-level client handle
|
||||
*/
|
||||
struct rpc_clnt {
|
||||
atomic_t cl_count; /* Number of references */
|
||||
refcount_t cl_count; /* Number of references */
|
||||
unsigned int cl_clid; /* client id */
|
||||
struct list_head cl_clients; /* Global list of clients */
|
||||
struct list_head cl_tasks; /* List of tasks */
|
||||
|
@ -81,6 +82,7 @@ struct rpc_clnt {
|
|||
struct work_struct cl_work;
|
||||
};
|
||||
const struct cred *cl_cred;
|
||||
unsigned int cl_max_connect; /* max number of transports not to the same IP */
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -135,6 +137,7 @@ struct rpc_create_args {
|
|||
char *client_name;
|
||||
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
|
||||
const struct cred *cred;
|
||||
unsigned int max_connect;
|
||||
};
|
||||
|
||||
struct rpc_add_xprt_test {
|
||||
|
|
|
@ -277,13 +277,13 @@ struct svc_rqst {
|
|||
#define RQ_VICTIM (5) /* about to be shut down */
|
||||
#define RQ_BUSY (6) /* request is busy */
|
||||
#define RQ_DATA (7) /* request has data */
|
||||
#define RQ_AUTHERR (8) /* Request status is auth error */
|
||||
unsigned long rq_flags; /* flags field */
|
||||
ktime_t rq_qtime; /* enqueue time */
|
||||
|
||||
void * rq_argp; /* decoded arguments */
|
||||
void * rq_resp; /* xdr'd results */
|
||||
void * rq_auth_data; /* flavor-specific data */
|
||||
__be32 rq_auth_stat; /* authentication status */
|
||||
int rq_auth_slack; /* extra space xdr code
|
||||
* should leave in head
|
||||
* for krb5i, krb5p.
|
||||
|
@ -537,7 +537,6 @@ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
|
|||
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
|
||||
struct kvec *first, void *p,
|
||||
size_t total);
|
||||
__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err);
|
||||
__be32 svc_generic_init_request(struct svc_rqst *rqstp,
|
||||
const struct svc_program *progp,
|
||||
struct svc_process_info *procinfo);
|
||||
|
|
|
@ -127,7 +127,7 @@ struct auth_ops {
|
|||
char * name;
|
||||
struct module *owner;
|
||||
int flavour;
|
||||
int (*accept)(struct svc_rqst *rq, __be32 *authp);
|
||||
int (*accept)(struct svc_rqst *rq);
|
||||
int (*release)(struct svc_rqst *rq);
|
||||
void (*domain_release)(struct auth_domain *);
|
||||
int (*set_client)(struct svc_rqst *rq);
|
||||
|
@ -149,7 +149,7 @@ struct auth_ops {
|
|||
|
||||
struct svc_xprt;
|
||||
|
||||
extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp);
|
||||
extern int svc_authenticate(struct svc_rqst *rqstp);
|
||||
extern int svc_authorise(struct svc_rqst *rqstp);
|
||||
extern int svc_set_client(struct svc_rqst *rqstp);
|
||||
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
|
||||
|
|
|
@ -431,6 +431,7 @@ void xprt_release_write(struct rpc_xprt *, struct rpc_task *);
|
|||
#define XPRT_CONGESTED (9)
|
||||
#define XPRT_CWND_WAIT (10)
|
||||
#define XPRT_WRITE_SPACE (11)
|
||||
#define XPRT_SND_IS_COOKIE (12)
|
||||
|
||||
static inline void xprt_set_connected(struct rpc_xprt *xprt)
|
||||
{
|
||||
|
|
|
@ -18,6 +18,7 @@ struct rpc_xprt_switch {
|
|||
unsigned int xps_id;
|
||||
unsigned int xps_nxprts;
|
||||
unsigned int xps_nactive;
|
||||
unsigned int xps_nunique_destaddr_xprts;
|
||||
atomic_long_t xps_queuelen;
|
||||
struct list_head xps_xprt_list;
|
||||
|
||||
|
|
|
@ -793,6 +793,39 @@ TRACE_EVENT(xprtrdma_post_send,
|
|||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xprtrdma_post_send_err,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_xprt *r_xprt,
|
||||
const struct rpcrdma_req *req,
|
||||
int rc
|
||||
),
|
||||
|
||||
TP_ARGS(r_xprt, req, rc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(unsigned int, task_id)
|
||||
__field(unsigned int, client_id)
|
||||
__field(int, rc)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
const struct rpc_rqst *rqst = &req->rl_slot;
|
||||
const struct rpcrdma_ep *ep = r_xprt->rx_ep;
|
||||
|
||||
__entry->cq_id = ep ? ep->re_attr.recv_cq->res.id : 0;
|
||||
__entry->task_id = rqst->rq_task->tk_pid;
|
||||
__entry->client_id = rqst->rq_task->tk_client ?
|
||||
rqst->rq_task->tk_client->cl_clid : -1;
|
||||
__entry->rc = rc;
|
||||
),
|
||||
|
||||
TP_printk("task:%u@%u cq.id=%u rc=%d",
|
||||
__entry->task_id, __entry->client_id,
|
||||
__entry->cq_id, __entry->rc
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xprtrdma_post_recv,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_rep *rep
|
||||
|
@ -818,16 +851,14 @@ TRACE_EVENT(xprtrdma_post_recv,
|
|||
TRACE_EVENT(xprtrdma_post_recvs,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_xprt *r_xprt,
|
||||
unsigned int count,
|
||||
int status
|
||||
unsigned int count
|
||||
),
|
||||
|
||||
TP_ARGS(r_xprt, count, status),
|
||||
TP_ARGS(r_xprt, count),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(unsigned int, count)
|
||||
__field(int, status)
|
||||
__field(int, posted)
|
||||
__string(addr, rpcrdma_addrstr(r_xprt))
|
||||
__string(port, rpcrdma_portstr(r_xprt))
|
||||
|
@ -838,15 +869,44 @@ TRACE_EVENT(xprtrdma_post_recvs,
|
|||
|
||||
__entry->cq_id = ep->re_attr.recv_cq->res.id;
|
||||
__entry->count = count;
|
||||
__entry->status = status;
|
||||
__entry->posted = ep->re_receive_count;
|
||||
__assign_str(addr, rpcrdma_addrstr(r_xprt));
|
||||
__assign_str(port, rpcrdma_portstr(r_xprt));
|
||||
),
|
||||
|
||||
TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active (rc %d)",
|
||||
TP_printk("peer=[%s]:%s cq.id=%d %u new recvs, %d active",
|
||||
__get_str(addr), __get_str(port), __entry->cq_id,
|
||||
__entry->count, __entry->posted, __entry->status
|
||||
__entry->count, __entry->posted
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xprtrdma_post_recvs_err,
|
||||
TP_PROTO(
|
||||
const struct rpcrdma_xprt *r_xprt,
|
||||
int status
|
||||
),
|
||||
|
||||
TP_ARGS(r_xprt, status),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, cq_id)
|
||||
__field(int, status)
|
||||
__string(addr, rpcrdma_addrstr(r_xprt))
|
||||
__string(port, rpcrdma_portstr(r_xprt))
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
const struct rpcrdma_ep *ep = r_xprt->rx_ep;
|
||||
|
||||
__entry->cq_id = ep->re_attr.recv_cq->res.id;
|
||||
__entry->status = status;
|
||||
__assign_str(addr, rpcrdma_addrstr(r_xprt));
|
||||
__assign_str(port, rpcrdma_portstr(r_xprt));
|
||||
),
|
||||
|
||||
TP_printk("peer=[%s]:%s cq.id=%d rc=%d",
|
||||
__get_str(addr), __get_str(port), __entry->cq_id,
|
||||
__entry->status
|
||||
)
|
||||
);
|
||||
|
||||
|
|
|
@ -295,25 +295,11 @@ TRACE_EVENT(rpc_request,
|
|||
)
|
||||
);
|
||||
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_ASYNC);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NULLCREDS);
|
||||
TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NO_ROUND_ROBIN);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SENT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_TIMEOUT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NOCONNECT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
|
||||
|
||||
#define rpc_show_task_flags(flags) \
|
||||
__print_flags(flags, "|", \
|
||||
{ RPC_TASK_ASYNC, "ASYNC" }, \
|
||||
{ RPC_TASK_SWAPPER, "SWAPPER" }, \
|
||||
{ RPC_TASK_MOVEABLE, "MOVEABLE" }, \
|
||||
{ RPC_TASK_NULLCREDS, "NULLCREDS" }, \
|
||||
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
|
||||
{ RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
|
||||
|
@ -327,14 +313,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_CRED_NOREF);
|
|||
{ RPC_TASK_NO_RETRANS_TIMEOUT, "NORTO" }, \
|
||||
{ RPC_TASK_CRED_NOREF, "CRED_NOREF" })
|
||||
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_RUNNING);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_QUEUED);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
|
||||
TRACE_DEFINE_ENUM(RPC_TASK_SIGNALLED);
|
||||
|
||||
#define rpc_show_runstate(flags) \
|
||||
__print_flags(flags, "|", \
|
||||
{ (1UL << RPC_TASK_RUNNING), "RUNNING" }, \
|
||||
|
@ -945,17 +923,6 @@ TRACE_EVENT(rpc_socket_nospace,
|
|||
)
|
||||
);
|
||||
|
||||
TRACE_DEFINE_ENUM(XPRT_LOCKED);
|
||||
TRACE_DEFINE_ENUM(XPRT_CONNECTED);
|
||||
TRACE_DEFINE_ENUM(XPRT_CONNECTING);
|
||||
TRACE_DEFINE_ENUM(XPRT_CLOSE_WAIT);
|
||||
TRACE_DEFINE_ENUM(XPRT_BOUND);
|
||||
TRACE_DEFINE_ENUM(XPRT_BINDING);
|
||||
TRACE_DEFINE_ENUM(XPRT_CLOSING);
|
||||
TRACE_DEFINE_ENUM(XPRT_CONGESTED);
|
||||
TRACE_DEFINE_ENUM(XPRT_CWND_WAIT);
|
||||
TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE);
|
||||
|
||||
#define rpc_show_xprt_state(x) \
|
||||
__print_flags(x, "|", \
|
||||
{ (1UL << XPRT_LOCKED), "LOCKED"}, \
|
||||
|
@ -965,6 +932,8 @@ TRACE_DEFINE_ENUM(XPRT_WRITE_SPACE);
|
|||
{ (1UL << XPRT_BOUND), "BOUND"}, \
|
||||
{ (1UL << XPRT_BINDING), "BINDING"}, \
|
||||
{ (1UL << XPRT_CLOSING), "CLOSING"}, \
|
||||
{ (1UL << XPRT_OFFLINE), "OFFLINE"}, \
|
||||
{ (1UL << XPRT_REMOVE), "REMOVE"}, \
|
||||
{ (1UL << XPRT_CONGESTED), "CONGESTED"}, \
|
||||
{ (1UL << XPRT_CWND_WAIT), "CWND_WAIT"}, \
|
||||
{ (1UL << XPRT_WRITE_SPACE), "WRITE_SPACE"})
|
||||
|
@ -1092,10 +1061,10 @@ TRACE_EVENT(xprt_retransmit,
|
|||
__field(u32, xid)
|
||||
__field(int, ntrans)
|
||||
__field(int, version)
|
||||
__field(unsigned long, timeout)
|
||||
__string(progname,
|
||||
rqst->rq_task->tk_client->cl_program->name)
|
||||
__string(procedure,
|
||||
rqst->rq_task->tk_msg.rpc_proc->p_name)
|
||||
__string(procname, rpc_proc_name(rqst->rq_task))
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -1106,17 +1075,19 @@ TRACE_EVENT(xprt_retransmit,
|
|||
task->tk_client->cl_clid : -1;
|
||||
__entry->xid = be32_to_cpu(rqst->rq_xid);
|
||||
__entry->ntrans = rqst->rq_ntrans;
|
||||
__entry->timeout = task->tk_timeout;
|
||||
__assign_str(progname,
|
||||
task->tk_client->cl_program->name);
|
||||
__entry->version = task->tk_client->cl_vers;
|
||||
__assign_str(procedure, task->tk_msg.rpc_proc->p_name);
|
||||
__assign_str(procname, rpc_proc_name(task));
|
||||
),
|
||||
|
||||
TP_printk(
|
||||
"task:%u@%u xid=0x%08x %sv%d %s ntrans=%d",
|
||||
"task:%u@%u xid=0x%08x %sv%d %s ntrans=%d timeout=%lu",
|
||||
__entry->task_id, __entry->client_id, __entry->xid,
|
||||
__get_str(progname), __entry->version, __get_str(procedure),
|
||||
__entry->ntrans)
|
||||
__get_str(progname), __entry->version, __get_str(procname),
|
||||
__entry->ntrans, __entry->timeout
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xprt_ping,
|
||||
|
@ -1568,8 +1539,7 @@ DEFINE_SVCXDRBUF_EVENT(sendto);
|
|||
svc_rqst_flag(SPLICE_OK) \
|
||||
svc_rqst_flag(VICTIM) \
|
||||
svc_rqst_flag(BUSY) \
|
||||
svc_rqst_flag(DATA) \
|
||||
svc_rqst_flag_end(AUTHERR)
|
||||
svc_rqst_flag_end(DATA)
|
||||
|
||||
#undef svc_rqst_flag
|
||||
#undef svc_rqst_flag_end
|
||||
|
@ -1611,9 +1581,9 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
|
|||
{ SVC_COMPLETE, "SVC_COMPLETE" })
|
||||
|
||||
TRACE_EVENT(svc_authenticate,
|
||||
TP_PROTO(const struct svc_rqst *rqst, int auth_res, __be32 auth_stat),
|
||||
TP_PROTO(const struct svc_rqst *rqst, int auth_res),
|
||||
|
||||
TP_ARGS(rqst, auth_res, auth_stat),
|
||||
TP_ARGS(rqst, auth_res),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, xid)
|
||||
|
@ -1624,7 +1594,7 @@ TRACE_EVENT(svc_authenticate,
|
|||
TP_fast_assign(
|
||||
__entry->xid = be32_to_cpu(rqst->rq_xid);
|
||||
__entry->svc_status = auth_res;
|
||||
__entry->auth_stat = be32_to_cpu(auth_stat);
|
||||
__entry->auth_stat = be32_to_cpu(rqst->rq_auth_stat);
|
||||
),
|
||||
|
||||
TP_printk("xid=0x%08x auth_res=%s auth_stat=%s",
|
||||
|
|
|
@ -160,7 +160,7 @@ static struct rpc_clnt *get_gssp_clnt(struct sunrpc_net *sn)
|
|||
mutex_lock(&sn->gssp_lock);
|
||||
clnt = sn->gssp_clnt;
|
||||
if (clnt)
|
||||
atomic_inc(&clnt->cl_count);
|
||||
refcount_inc(&clnt->cl_count);
|
||||
mutex_unlock(&sn->gssp_lock);
|
||||
return clnt;
|
||||
}
|
||||
|
|
|
@ -707,11 +707,11 @@ svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
|
|||
/*
|
||||
* Verify the checksum on the header and return SVC_OK on success.
|
||||
* Otherwise, return SVC_DROP (in the case of a bad sequence number)
|
||||
* or return SVC_DENIED and indicate error in authp.
|
||||
* or return SVC_DENIED and indicate error in rqstp->rq_auth_stat.
|
||||
*/
|
||||
static int
|
||||
gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
|
||||
__be32 *rpcstart, struct rpc_gss_wire_cred *gc, __be32 *authp)
|
||||
__be32 *rpcstart, struct rpc_gss_wire_cred *gc)
|
||||
{
|
||||
struct gss_ctx *ctx_id = rsci->mechctx;
|
||||
struct xdr_buf rpchdr;
|
||||
|
@ -725,7 +725,7 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
|
|||
iov.iov_len = (u8 *)argv->iov_base - (u8 *)rpcstart;
|
||||
xdr_buf_from_iov(&iov, &rpchdr);
|
||||
|
||||
*authp = rpc_autherr_badverf;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badverf;
|
||||
if (argv->iov_len < 4)
|
||||
return SVC_DENIED;
|
||||
flavor = svc_getnl(argv);
|
||||
|
@ -737,13 +737,13 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
|
|||
if (rqstp->rq_deferred) /* skip verification of revisited request */
|
||||
return SVC_OK;
|
||||
if (gss_verify_mic(ctx_id, &rpchdr, &checksum) != GSS_S_COMPLETE) {
|
||||
*authp = rpcsec_gsserr_credproblem;
|
||||
rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
|
||||
if (gc->gc_seq > MAXSEQ) {
|
||||
trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq);
|
||||
*authp = rpcsec_gsserr_ctxproblem;
|
||||
rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq))
|
||||
|
@ -1038,6 +1038,8 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
|
|||
struct rpc_gss_wire_cred *gc = &svcdata->clcred;
|
||||
int stat;
|
||||
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
|
||||
/*
|
||||
* A gss export can be specified either by:
|
||||
* export *(sec=krb5,rw)
|
||||
|
@ -1053,6 +1055,8 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
|
|||
stat = svcauth_unix_set_client(rqstp);
|
||||
if (stat == SVC_DROP || stat == SVC_CLOSE)
|
||||
return stat;
|
||||
|
||||
rqstp->rq_auth_stat = rpc_auth_ok;
|
||||
return SVC_OK;
|
||||
}
|
||||
|
||||
|
@ -1142,7 +1146,7 @@ static void gss_free_in_token_pages(struct gssp_in_token *in_token)
|
|||
}
|
||||
|
||||
static int gss_read_proxy_verf(struct svc_rqst *rqstp,
|
||||
struct rpc_gss_wire_cred *gc, __be32 *authp,
|
||||
struct rpc_gss_wire_cred *gc,
|
||||
struct xdr_netobj *in_handle,
|
||||
struct gssp_in_token *in_token)
|
||||
{
|
||||
|
@ -1151,7 +1155,7 @@ static int gss_read_proxy_verf(struct svc_rqst *rqstp,
|
|||
int pages, i, res, pgto, pgfrom;
|
||||
size_t inlen, to_offs, from_offs;
|
||||
|
||||
res = gss_read_common_verf(gc, argv, authp, in_handle);
|
||||
res = gss_read_common_verf(gc, argv, &rqstp->rq_auth_stat, in_handle);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
|
@ -1227,7 +1231,7 @@ gss_write_resv(struct kvec *resv, size_t size_limit,
|
|||
* Otherwise, drop the request pending an answer to the upcall.
|
||||
*/
|
||||
static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
|
||||
struct rpc_gss_wire_cred *gc, __be32 *authp)
|
||||
struct rpc_gss_wire_cred *gc)
|
||||
{
|
||||
struct kvec *argv = &rqstp->rq_arg.head[0];
|
||||
struct kvec *resv = &rqstp->rq_res.head[0];
|
||||
|
@ -1236,7 +1240,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
|
|||
struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
|
||||
|
||||
memset(&rsikey, 0, sizeof(rsikey));
|
||||
ret = gss_read_verf(gc, argv, authp,
|
||||
ret = gss_read_verf(gc, argv, &rqstp->rq_auth_stat,
|
||||
&rsikey.in_handle, &rsikey.in_token);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1339,7 +1343,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
|
|||
}
|
||||
|
||||
static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
|
||||
struct rpc_gss_wire_cred *gc, __be32 *authp)
|
||||
struct rpc_gss_wire_cred *gc)
|
||||
{
|
||||
struct kvec *resv = &rqstp->rq_res.head[0];
|
||||
struct xdr_netobj cli_handle;
|
||||
|
@ -1351,8 +1355,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
|
|||
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
|
||||
|
||||
memset(&ud, 0, sizeof(ud));
|
||||
ret = gss_read_proxy_verf(rqstp, gc, authp,
|
||||
&ud.in_handle, &ud.in_token);
|
||||
ret = gss_read_proxy_verf(rqstp, gc, &ud.in_handle, &ud.in_token);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1525,7 +1528,7 @@ static void destroy_use_gss_proxy_proc_entry(struct net *net) {}
|
|||
* response here and return SVC_COMPLETE.
|
||||
*/
|
||||
static int
|
||||
svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
|
||||
svcauth_gss_accept(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct kvec *argv = &rqstp->rq_arg.head[0];
|
||||
struct kvec *resv = &rqstp->rq_res.head[0];
|
||||
|
@ -1538,7 +1541,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
int ret;
|
||||
struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
|
||||
|
||||
*authp = rpc_autherr_badcred;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
if (!svcdata)
|
||||
svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
|
||||
if (!svcdata)
|
||||
|
@ -1575,22 +1578,22 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
if ((gc->gc_proc != RPC_GSS_PROC_DATA) && (rqstp->rq_proc != 0))
|
||||
goto auth_err;
|
||||
|
||||
*authp = rpc_autherr_badverf;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badverf;
|
||||
switch (gc->gc_proc) {
|
||||
case RPC_GSS_PROC_INIT:
|
||||
case RPC_GSS_PROC_CONTINUE_INIT:
|
||||
if (use_gss_proxy(SVC_NET(rqstp)))
|
||||
return svcauth_gss_proxy_init(rqstp, gc, authp);
|
||||
return svcauth_gss_proxy_init(rqstp, gc);
|
||||
else
|
||||
return svcauth_gss_legacy_init(rqstp, gc, authp);
|
||||
return svcauth_gss_legacy_init(rqstp, gc);
|
||||
case RPC_GSS_PROC_DATA:
|
||||
case RPC_GSS_PROC_DESTROY:
|
||||
/* Look up the context, and check the verifier: */
|
||||
*authp = rpcsec_gsserr_credproblem;
|
||||
rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
|
||||
rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx);
|
||||
if (!rsci)
|
||||
goto auth_err;
|
||||
switch (gss_verify_header(rqstp, rsci, rpcstart, gc, authp)) {
|
||||
switch (gss_verify_header(rqstp, rsci, rpcstart, gc)) {
|
||||
case SVC_OK:
|
||||
break;
|
||||
case SVC_DENIED:
|
||||
|
@ -1600,7 +1603,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
}
|
||||
break;
|
||||
default:
|
||||
*authp = rpc_autherr_rejectedcred;
|
||||
rqstp->rq_auth_stat = rpc_autherr_rejectedcred;
|
||||
goto auth_err;
|
||||
}
|
||||
|
||||
|
@ -1616,13 +1619,13 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
svc_putnl(resv, RPC_SUCCESS);
|
||||
goto complete;
|
||||
case RPC_GSS_PROC_DATA:
|
||||
*authp = rpcsec_gsserr_ctxproblem;
|
||||
rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
|
||||
svcdata->verf_start = resv->iov_base + resv->iov_len;
|
||||
if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
|
||||
goto auth_err;
|
||||
rqstp->rq_cred = rsci->cred;
|
||||
get_group_info(rsci->cred.cr_group_info);
|
||||
*authp = rpc_autherr_badcred;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
switch (gc->gc_svc) {
|
||||
case RPC_GSS_SVC_NONE:
|
||||
break;
|
||||
|
|
|
@ -167,7 +167,7 @@ static int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event)
|
|||
case RPC_PIPEFS_MOUNT:
|
||||
if (clnt->cl_pipedir_objects.pdh_dentry != NULL)
|
||||
return 1;
|
||||
if (atomic_read(&clnt->cl_count) == 0)
|
||||
if (refcount_read(&clnt->cl_count) == 0)
|
||||
return 1;
|
||||
break;
|
||||
case RPC_PIPEFS_UMOUNT:
|
||||
|
@ -419,7 +419,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
|
|||
clnt->cl_rtt = &clnt->cl_rtt_default;
|
||||
rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval);
|
||||
|
||||
atomic_set(&clnt->cl_count, 1);
|
||||
refcount_set(&clnt->cl_count, 1);
|
||||
|
||||
if (nodename == NULL)
|
||||
nodename = utsname()->nodename;
|
||||
|
@ -431,7 +431,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
|
|||
if (err)
|
||||
goto out_no_path;
|
||||
if (parent)
|
||||
atomic_inc(&parent->cl_count);
|
||||
refcount_inc(&parent->cl_count);
|
||||
|
||||
trace_rpc_clnt_new(clnt, xprt, program->name, args->servername);
|
||||
return clnt;
|
||||
|
@ -918,18 +918,16 @@ rpc_free_client(struct rpc_clnt *clnt)
|
|||
static struct rpc_clnt *
|
||||
rpc_free_auth(struct rpc_clnt *clnt)
|
||||
{
|
||||
if (clnt->cl_auth == NULL)
|
||||
return rpc_free_client(clnt);
|
||||
|
||||
/*
|
||||
* Note: RPCSEC_GSS may need to send NULL RPC calls in order to
|
||||
* release remaining GSS contexts. This mechanism ensures
|
||||
* that it can do so safely.
|
||||
*/
|
||||
atomic_inc(&clnt->cl_count);
|
||||
rpcauth_release(clnt->cl_auth);
|
||||
clnt->cl_auth = NULL;
|
||||
if (atomic_dec_and_test(&clnt->cl_count))
|
||||
if (clnt->cl_auth != NULL) {
|
||||
rpcauth_release(clnt->cl_auth);
|
||||
clnt->cl_auth = NULL;
|
||||
}
|
||||
if (refcount_dec_and_test(&clnt->cl_count))
|
||||
return rpc_free_client(clnt);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -943,7 +941,7 @@ rpc_release_client(struct rpc_clnt *clnt)
|
|||
do {
|
||||
if (list_empty(&clnt->cl_tasks))
|
||||
wake_up(&destroy_wait);
|
||||
if (!atomic_dec_and_test(&clnt->cl_count))
|
||||
if (refcount_dec_not_one(&clnt->cl_count))
|
||||
break;
|
||||
clnt = rpc_free_auth(clnt);
|
||||
} while (clnt != NULL);
|
||||
|
@ -1082,7 +1080,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
|
|||
if (clnt != NULL) {
|
||||
rpc_task_set_transport(task, clnt);
|
||||
task->tk_client = clnt;
|
||||
atomic_inc(&clnt->cl_count);
|
||||
refcount_inc(&clnt->cl_count);
|
||||
if (clnt->cl_softrtry)
|
||||
task->tk_flags |= RPC_TASK_SOFT;
|
||||
if (clnt->cl_softerr)
|
||||
|
@ -2694,17 +2692,18 @@ static const struct rpc_procinfo rpcproc_null = {
|
|||
.p_decode = rpcproc_decode_null,
|
||||
};
|
||||
|
||||
static int rpc_ping(struct rpc_clnt *clnt)
|
||||
static void
|
||||
rpc_null_call_prepare(struct rpc_task *task, void *data)
|
||||
{
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &rpcproc_null,
|
||||
};
|
||||
int err;
|
||||
err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
|
||||
RPC_TASK_NULLCREDS);
|
||||
return err;
|
||||
task->tk_flags &= ~RPC_TASK_NO_RETRANS_TIMEOUT;
|
||||
rpc_call_start(task);
|
||||
}
|
||||
|
||||
static const struct rpc_call_ops rpc_null_ops = {
|
||||
.rpc_call_prepare = rpc_null_call_prepare,
|
||||
.rpc_call_done = rpc_default_callback,
|
||||
};
|
||||
|
||||
static
|
||||
struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
|
||||
struct rpc_xprt *xprt, struct rpc_cred *cred, int flags,
|
||||
|
@ -2718,7 +2717,7 @@ struct rpc_task *rpc_call_null_helper(struct rpc_clnt *clnt,
|
|||
.rpc_xprt = xprt,
|
||||
.rpc_message = &msg,
|
||||
.rpc_op_cred = cred,
|
||||
.callback_ops = (ops != NULL) ? ops : &rpc_default_ops,
|
||||
.callback_ops = ops ?: &rpc_null_ops,
|
||||
.callback_data = data,
|
||||
.flags = flags | RPC_TASK_SOFT | RPC_TASK_SOFTCONN |
|
||||
RPC_TASK_NULLCREDS,
|
||||
|
@ -2733,6 +2732,19 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(rpc_call_null);
|
||||
|
||||
static int rpc_ping(struct rpc_clnt *clnt)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
int status;
|
||||
|
||||
task = rpc_call_null_helper(clnt, NULL, NULL, 0, NULL, NULL);
|
||||
if (IS_ERR(task))
|
||||
return PTR_ERR(task);
|
||||
status = task->tk_status;
|
||||
rpc_put_task(task);
|
||||
return status;
|
||||
}
|
||||
|
||||
struct rpc_cb_add_xprt_calldata {
|
||||
struct rpc_xprt_switch *xps;
|
||||
struct rpc_xprt *xprt;
|
||||
|
@ -2756,6 +2768,7 @@ static void rpc_cb_add_xprt_release(void *calldata)
|
|||
}
|
||||
|
||||
static const struct rpc_call_ops rpc_cb_add_xprt_call_ops = {
|
||||
.rpc_call_prepare = rpc_null_call_prepare,
|
||||
.rpc_call_done = rpc_cb_add_xprt_done,
|
||||
.rpc_release = rpc_cb_add_xprt_release,
|
||||
};
|
||||
|
@ -2774,6 +2787,15 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
|
|||
struct rpc_cb_add_xprt_calldata *data;
|
||||
struct rpc_task *task;
|
||||
|
||||
if (xps->xps_nunique_destaddr_xprts + 1 > clnt->cl_max_connect) {
|
||||
rcu_read_lock();
|
||||
pr_warn("SUNRPC: reached max allowed number (%d) did not add "
|
||||
"transport to server: %s\n", clnt->cl_max_connect,
|
||||
rpc_peeraddr2str(clnt, RPC_DISPLAY_ADDR));
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
data = kmalloc(sizeof(*data), GFP_NOFS);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
@ -2786,7 +2808,7 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
|
|||
|
||||
task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
|
||||
&rpc_cb_add_xprt_call_ops, data);
|
||||
|
||||
data->xps->xps_nunique_destaddr_xprts++;
|
||||
rpc_put_task(task);
|
||||
success:
|
||||
return 1;
|
||||
|
|
|
@ -90,7 +90,7 @@ static int tasks_open(struct inode *inode, struct file *filp)
|
|||
struct seq_file *seq = filp->private_data;
|
||||
struct rpc_clnt *clnt = seq->private = inode->i_private;
|
||||
|
||||
if (!atomic_inc_not_zero(&clnt->cl_count)) {
|
||||
if (!refcount_inc_not_zero(&clnt->cl_count)) {
|
||||
seq_release(inode, filp);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
|
|
@ -423,7 +423,7 @@ rpc_info_open(struct inode *inode, struct file *file)
|
|||
spin_lock(&file->f_path.dentry->d_lock);
|
||||
if (!d_unhashed(file->f_path.dentry))
|
||||
clnt = RPC_I(inode)->private;
|
||||
if (clnt != NULL && atomic_inc_not_zero(&clnt->cl_count)) {
|
||||
if (clnt != NULL && refcount_inc_not_zero(&clnt->cl_count)) {
|
||||
spin_unlock(&file->f_path.dentry->d_lock);
|
||||
m->private = clnt;
|
||||
} else {
|
||||
|
|
|
@ -1186,22 +1186,6 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
|
|||
static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
|
||||
#endif
|
||||
|
||||
__be32
|
||||
svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err)
|
||||
{
|
||||
set_bit(RQ_AUTHERR, &rqstp->rq_flags);
|
||||
return auth_err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_return_autherr);
|
||||
|
||||
static __be32
|
||||
svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp)
|
||||
{
|
||||
if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags))
|
||||
return *statp;
|
||||
return rpc_auth_ok;
|
||||
}
|
||||
|
||||
static int
|
||||
svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
|
||||
{
|
||||
|
@ -1225,7 +1209,7 @@ svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
|
|||
test_bit(RQ_DROPME, &rqstp->rq_flags))
|
||||
return 0;
|
||||
|
||||
if (test_bit(RQ_AUTHERR, &rqstp->rq_flags))
|
||||
if (rqstp->rq_auth_stat != rpc_auth_ok)
|
||||
return 1;
|
||||
|
||||
if (*statp != rpc_success)
|
||||
|
@ -1306,7 +1290,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|||
struct svc_process_info process;
|
||||
__be32 *statp;
|
||||
u32 prog, vers;
|
||||
__be32 auth_stat, rpc_stat;
|
||||
__be32 rpc_stat;
|
||||
int auth_res;
|
||||
__be32 *reply_statp;
|
||||
|
||||
|
@ -1349,14 +1333,12 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|||
* We do this before anything else in order to get a decent
|
||||
* auth verifier.
|
||||
*/
|
||||
auth_res = svc_authenticate(rqstp, &auth_stat);
|
||||
auth_res = svc_authenticate(rqstp);
|
||||
/* Also give the program a chance to reject this call: */
|
||||
if (auth_res == SVC_OK && progp) {
|
||||
auth_stat = rpc_autherr_badcred;
|
||||
if (auth_res == SVC_OK && progp)
|
||||
auth_res = progp->pg_authenticate(rqstp);
|
||||
}
|
||||
if (auth_res != SVC_OK)
|
||||
trace_svc_authenticate(rqstp, auth_res, auth_stat);
|
||||
trace_svc_authenticate(rqstp, auth_res);
|
||||
switch (auth_res) {
|
||||
case SVC_OK:
|
||||
break;
|
||||
|
@ -1415,15 +1397,15 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|||
goto release_dropit;
|
||||
if (*statp == rpc_garbage_args)
|
||||
goto err_garbage;
|
||||
auth_stat = svc_get_autherr(rqstp, statp);
|
||||
if (auth_stat != rpc_auth_ok)
|
||||
goto err_release_bad_auth;
|
||||
} else {
|
||||
dprintk("svc: calling dispatcher\n");
|
||||
if (!process.dispatch(rqstp, statp))
|
||||
goto release_dropit; /* Release reply info */
|
||||
}
|
||||
|
||||
if (rqstp->rq_auth_stat != rpc_auth_ok)
|
||||
goto err_release_bad_auth;
|
||||
|
||||
/* Check RPC status result */
|
||||
if (*statp != rpc_success)
|
||||
resv->iov_len = ((void*)statp) - resv->iov_base + 4;
|
||||
|
@ -1473,13 +1455,14 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
|
|||
if (procp->pc_release)
|
||||
procp->pc_release(rqstp);
|
||||
err_bad_auth:
|
||||
dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
|
||||
dprintk("svc: authentication failed (%d)\n",
|
||||
be32_to_cpu(rqstp->rq_auth_stat));
|
||||
serv->sv_stats->rpcbadauth++;
|
||||
/* Restore write pointer to location of accept status: */
|
||||
xdr_ressize_check(rqstp, reply_statp);
|
||||
svc_putnl(resv, 1); /* REJECT */
|
||||
svc_putnl(resv, 1); /* AUTH_ERROR */
|
||||
svc_putnl(resv, ntohl(auth_stat)); /* status */
|
||||
svc_putu32(resv, rqstp->rq_auth_stat); /* status */
|
||||
goto sendit;
|
||||
|
||||
err_bad_prog:
|
||||
|
|
|
@ -59,12 +59,12 @@ svc_put_auth_ops(struct auth_ops *aops)
|
|||
}
|
||||
|
||||
int
|
||||
svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
|
||||
svc_authenticate(struct svc_rqst *rqstp)
|
||||
{
|
||||
rpc_authflavor_t flavor;
|
||||
struct auth_ops *aops;
|
||||
|
||||
*authp = rpc_auth_ok;
|
||||
rqstp->rq_auth_stat = rpc_auth_ok;
|
||||
|
||||
flavor = svc_getnl(&rqstp->rq_arg.head[0]);
|
||||
|
||||
|
@ -72,7 +72,7 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
|
|||
|
||||
aops = svc_get_auth_ops(flavor);
|
||||
if (aops == NULL) {
|
||||
*authp = rpc_autherr_badcred;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
|
||||
|
@ -80,7 +80,7 @@ svc_authenticate(struct svc_rqst *rqstp, __be32 *authp)
|
|||
init_svc_cred(&rqstp->rq_cred);
|
||||
|
||||
rqstp->rq_authop = aops;
|
||||
return aops->accept(rqstp, authp);
|
||||
return aops->accept(rqstp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(svc_authenticate);
|
||||
|
||||
|
|
|
@ -681,8 +681,9 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
|
|||
|
||||
rqstp->rq_client = NULL;
|
||||
if (rqstp->rq_proc == 0)
|
||||
return SVC_OK;
|
||||
goto out;
|
||||
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
ipm = ip_map_cached_get(xprt);
|
||||
if (ipm == NULL)
|
||||
ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
|
||||
|
@ -719,13 +720,16 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
|
|||
put_group_info(cred->cr_group_info);
|
||||
cred->cr_group_info = gi;
|
||||
}
|
||||
|
||||
out:
|
||||
rqstp->rq_auth_stat = rpc_auth_ok;
|
||||
return SVC_OK;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
|
||||
|
||||
static int
|
||||
svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
|
||||
svcauth_null_accept(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct kvec *argv = &rqstp->rq_arg.head[0];
|
||||
struct kvec *resv = &rqstp->rq_res.head[0];
|
||||
|
@ -736,12 +740,12 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
|
||||
if (svc_getu32(argv) != 0) {
|
||||
dprintk("svc: bad null cred\n");
|
||||
*authp = rpc_autherr_badcred;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
|
||||
dprintk("svc: bad null verf\n");
|
||||
*authp = rpc_autherr_badverf;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badverf;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
|
||||
|
@ -785,7 +789,7 @@ struct auth_ops svcauth_null = {
|
|||
|
||||
|
||||
static int
|
||||
svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
|
||||
svcauth_unix_accept(struct svc_rqst *rqstp)
|
||||
{
|
||||
struct kvec *argv = &rqstp->rq_arg.head[0];
|
||||
struct kvec *resv = &rqstp->rq_res.head[0];
|
||||
|
@ -827,7 +831,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
}
|
||||
groups_sort(cred->cr_group_info);
|
||||
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
|
||||
*authp = rpc_autherr_badverf;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badverf;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
|
||||
|
@ -839,7 +843,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
|
|||
return SVC_OK;
|
||||
|
||||
badcred:
|
||||
*authp = rpc_autherr_badcred;
|
||||
rqstp->rq_auth_stat = rpc_autherr_badcred;
|
||||
return SVC_DENIED;
|
||||
}
|
||||
|
||||
|
|
|
@ -100,6 +100,28 @@ static ssize_t rpc_sysfs_xprt_dstaddr_show(struct kobject *kobj,
|
|||
return ret + 1;
|
||||
}
|
||||
|
||||
static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
|
||||
struct sockaddr_storage saddr;
|
||||
struct sock_xprt *sock;
|
||||
ssize_t ret = -1;
|
||||
|
||||
if (!xprt)
|
||||
return 0;
|
||||
|
||||
sock = container_of(xprt, struct sock_xprt, xprt);
|
||||
if (kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
|
||||
goto out;
|
||||
|
||||
ret = sprintf(buf, "%pISc\n", &saddr);
|
||||
out:
|
||||
xprt_put(xprt);
|
||||
return ret + 1;
|
||||
}
|
||||
|
||||
static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
char *buf)
|
||||
|
@ -114,14 +136,16 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
|
|||
"max_num_slots=%u\nmin_num_slots=%u\nnum_reqs=%u\n"
|
||||
"binding_q_len=%u\nsending_q_len=%u\npending_q_len=%u\n"
|
||||
"backlog_q_len=%u\nmain_xprt=%d\nsrc_port=%u\n"
|
||||
"tasks_queuelen=%ld\n",
|
||||
"tasks_queuelen=%ld\ndst_port=%s\n",
|
||||
xprt->last_used, xprt->cong, xprt->cwnd, xprt->max_reqs,
|
||||
xprt->min_reqs, xprt->num_reqs, xprt->binding.qlen,
|
||||
xprt->sending.qlen, xprt->pending.qlen,
|
||||
xprt->backlog.qlen, xprt->main,
|
||||
(xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ?
|
||||
get_srcport(xprt) : 0,
|
||||
atomic_long_read(&xprt->queuelen));
|
||||
atomic_long_read(&xprt->queuelen),
|
||||
(xprt->xprt_class->ident == XPRT_TRANSPORT_TCP) ?
|
||||
xprt->address_strings[RPC_DISPLAY_PORT] : "0");
|
||||
xprt_put(xprt);
|
||||
return ret + 1;
|
||||
}
|
||||
|
@ -183,8 +207,10 @@ static ssize_t rpc_sysfs_xprt_switch_info_show(struct kobject *kobj,
|
|||
|
||||
if (!xprt_switch)
|
||||
return 0;
|
||||
ret = sprintf(buf, "num_xprts=%u\nnum_active=%u\nqueue_len=%ld\n",
|
||||
ret = sprintf(buf, "num_xprts=%u\nnum_active=%u\n"
|
||||
"num_unique_destaddr=%u\nqueue_len=%ld\n",
|
||||
xprt_switch->xps_nxprts, xprt_switch->xps_nactive,
|
||||
xprt_switch->xps_nunique_destaddr_xprts,
|
||||
atomic_long_read(&xprt_switch->xps_queuelen));
|
||||
xprt_switch_put(xprt_switch);
|
||||
return ret + 1;
|
||||
|
@ -376,6 +402,9 @@ static const void *rpc_sysfs_xprt_namespace(struct kobject *kobj)
|
|||
static struct kobj_attribute rpc_sysfs_xprt_dstaddr = __ATTR(dstaddr,
|
||||
0644, rpc_sysfs_xprt_dstaddr_show, rpc_sysfs_xprt_dstaddr_store);
|
||||
|
||||
static struct kobj_attribute rpc_sysfs_xprt_srcaddr = __ATTR(srcaddr,
|
||||
0644, rpc_sysfs_xprt_srcaddr_show, NULL);
|
||||
|
||||
static struct kobj_attribute rpc_sysfs_xprt_info = __ATTR(xprt_info,
|
||||
0444, rpc_sysfs_xprt_info_show, NULL);
|
||||
|
||||
|
@ -384,6 +413,7 @@ static struct kobj_attribute rpc_sysfs_xprt_change_state = __ATTR(xprt_state,
|
|||
|
||||
static struct attribute *rpc_sysfs_xprt_attrs[] = {
|
||||
&rpc_sysfs_xprt_dstaddr.attr,
|
||||
&rpc_sysfs_xprt_srcaddr.attr,
|
||||
&rpc_sysfs_xprt_info.attr,
|
||||
&rpc_sysfs_xprt_change_state.attr,
|
||||
NULL,
|
||||
|
|
|
@ -761,6 +761,20 @@ void xprt_disconnect_done(struct rpc_xprt *xprt)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_disconnect_done);
|
||||
|
||||
/**
|
||||
* xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
|
||||
* @xprt: transport to disconnect
|
||||
*/
|
||||
static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
|
||||
{
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
||||
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
||||
else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
|
||||
rpc_wake_up_queued_task_set_status(&xprt->pending,
|
||||
xprt->snd_task, -ENOTCONN);
|
||||
}
|
||||
|
||||
/**
|
||||
* xprt_force_disconnect - force a transport to disconnect
|
||||
* @xprt: transport to disconnect
|
||||
|
@ -772,13 +786,7 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
|
|||
|
||||
/* Don't race with the test_bit() in xprt_clear_locked() */
|
||||
spin_lock(&xprt->transport_lock);
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
/* Try to schedule an autoclose RPC call */
|
||||
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
||||
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
||||
else if (xprt->snd_task)
|
||||
rpc_wake_up_queued_task_set_status(&xprt->pending,
|
||||
xprt->snd_task, -ENOTCONN);
|
||||
xprt_schedule_autoclose_locked(xprt);
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
|
||||
|
@ -818,11 +826,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
|
|||
goto out;
|
||||
if (test_bit(XPRT_CLOSING, &xprt->state))
|
||||
goto out;
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
/* Try to schedule an autoclose RPC call */
|
||||
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
|
||||
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
|
||||
xprt_wake_pending_tasks(xprt, -EAGAIN);
|
||||
xprt_schedule_autoclose_locked(xprt);
|
||||
out:
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
}
|
||||
|
@ -880,12 +884,14 @@ bool xprt_lock_connect(struct rpc_xprt *xprt,
|
|||
goto out;
|
||||
if (xprt->snd_task != task)
|
||||
goto out;
|
||||
set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
|
||||
xprt->snd_task = cookie;
|
||||
ret = true;
|
||||
out:
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_lock_connect);
|
||||
|
||||
void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
|
||||
{
|
||||
|
@ -895,12 +901,14 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
|
|||
if (!test_bit(XPRT_LOCKED, &xprt->state))
|
||||
goto out;
|
||||
xprt->snd_task =NULL;
|
||||
clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
|
||||
xprt->ops->release_xprt(xprt, NULL);
|
||||
xprt_schedule_autodisconnect(xprt);
|
||||
out:
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
wake_up_bit(&xprt->state, XPRT_LOCKED);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xprt_unlock_connect);
|
||||
|
||||
/**
|
||||
* xprt_connect - schedule a transport connect operation
|
||||
|
|
|
@ -139,6 +139,7 @@ struct rpc_xprt_switch *xprt_switch_alloc(struct rpc_xprt *xprt,
|
|||
xps->xps_iter_ops = &rpc_xprt_iter_singular;
|
||||
rpc_sysfs_xprt_switch_setup(xps, xprt, gfp_flags);
|
||||
xprt_switch_add_xprt_locked(xps, xprt);
|
||||
xps->xps_nunique_destaddr_xprts = 1;
|
||||
rpc_sysfs_xprt_setup(xps, xprt, gfp_flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
|
|||
if (rc < 0)
|
||||
goto failed_marshal;
|
||||
|
||||
if (rpcrdma_post_sends(r_xprt, req))
|
||||
if (frwr_send(r_xprt, req))
|
||||
goto drop_connection;
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -394,6 +394,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|||
struct rpcrdma_ep *ep = r_xprt->rx_ep;
|
||||
struct rpcrdma_mr *mr;
|
||||
unsigned int num_wrs;
|
||||
int ret;
|
||||
|
||||
num_wrs = 1;
|
||||
post_wr = send_wr;
|
||||
|
@ -420,7 +421,10 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|||
}
|
||||
|
||||
trace_xprtrdma_post_send(req);
|
||||
return ib_post_send(ep->re_id->qp, post_wr, NULL);
|
||||
ret = ib_post_send(ep->re_id->qp, post_wr, NULL);
|
||||
if (ret)
|
||||
trace_xprtrdma_post_send_err(r_xprt, req, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -557,6 +561,10 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|||
|
||||
/* On error, the MRs get destroyed once the QP has drained. */
|
||||
trace_xprtrdma_post_linv_err(req, rc);
|
||||
|
||||
/* Force a connection loss to ensure complete recovery.
|
||||
*/
|
||||
rpcrdma_force_disconnect(ep);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -653,4 +661,8 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
|||
* retransmission.
|
||||
*/
|
||||
rpcrdma_unpin_rqst(req->rl_reply);
|
||||
|
||||
/* Force a connection loss to ensure complete recovery.
|
||||
*/
|
||||
rpcrdma_force_disconnect(ep);
|
||||
}
|
||||
|
|
|
@ -250,12 +250,9 @@ xprt_rdma_connect_worker(struct work_struct *work)
|
|||
xprt->stat.connect_start;
|
||||
xprt_set_connected(xprt);
|
||||
rc = -EAGAIN;
|
||||
} else {
|
||||
/* Force a call to xprt_rdma_close to clean up */
|
||||
spin_lock(&xprt->transport_lock);
|
||||
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
|
||||
spin_unlock(&xprt->transport_lock);
|
||||
}
|
||||
} else
|
||||
rpcrdma_xprt_disconnect(r_xprt);
|
||||
xprt_unlock_connect(xprt, r_xprt);
|
||||
xprt_wake_pending_tasks(xprt, rc);
|
||||
}
|
||||
|
||||
|
@ -489,6 +486,8 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
|
|||
struct rpcrdma_ep *ep = r_xprt->rx_ep;
|
||||
unsigned long delay;
|
||||
|
||||
WARN_ON_ONCE(!xprt_lock_connect(xprt, task, r_xprt));
|
||||
|
||||
delay = 0;
|
||||
if (ep && ep->re_connect_status != 0) {
|
||||
delay = xprt_reconnect_delay(xprt);
|
||||
|
@ -661,7 +660,7 @@ xprt_rdma_send_request(struct rpc_rqst *rqst)
|
|||
goto drop_connection;
|
||||
rqst->rq_xtime = ktime_get();
|
||||
|
||||
if (rpcrdma_post_sends(r_xprt, req))
|
||||
if (frwr_send(r_xprt, req))
|
||||
goto drop_connection;
|
||||
|
||||
rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
|
||||
|
|
|
@ -124,7 +124,7 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
|
|||
* connection is closed or lost. (The important thing is it needs
|
||||
* to be invoked "at least" once).
|
||||
*/
|
||||
static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
|
||||
void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
|
||||
{
|
||||
if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
|
||||
xprt_force_disconnect(ep->re_xprt);
|
||||
|
@ -1349,21 +1349,6 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb)
|
|||
kfree(rb);
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_post_sends - Post WRs to a transport's Send Queue
|
||||
* @r_xprt: controlling transport instance
|
||||
* @req: rpcrdma_req containing the Send WR to post
|
||||
*
|
||||
* Returns 0 if the post was successful, otherwise -ENOTCONN
|
||||
* is returned.
|
||||
*/
|
||||
int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
|
||||
{
|
||||
if (frwr_send(r_xprt, req))
|
||||
return -ENOTCONN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* rpcrdma_post_recvs - Refill the Receive Queue
|
||||
* @r_xprt: controlling transport instance
|
||||
|
@ -1416,12 +1401,8 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
|
|||
|
||||
rc = ib_post_recv(ep->re_id->qp, wr,
|
||||
(const struct ib_recv_wr **)&bad_wr);
|
||||
if (atomic_dec_return(&ep->re_receiving) > 0)
|
||||
complete(&ep->re_done);
|
||||
|
||||
out:
|
||||
trace_xprtrdma_post_recvs(r_xprt, count, rc);
|
||||
if (rc) {
|
||||
trace_xprtrdma_post_recvs_err(r_xprt, rc);
|
||||
for (wr = bad_wr; wr;) {
|
||||
struct rpcrdma_rep *rep;
|
||||
|
||||
|
@ -1431,6 +1412,11 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp)
|
|||
--count;
|
||||
}
|
||||
}
|
||||
if (atomic_dec_return(&ep->re_receiving) > 0)
|
||||
complete(&ep->re_done);
|
||||
|
||||
out:
|
||||
trace_xprtrdma_post_recvs(r_xprt, count);
|
||||
ep->re_receive_count += count;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -454,11 +454,11 @@ extern unsigned int xprt_rdma_memreg_strategy;
|
|||
/*
|
||||
* Endpoint calls - xprtrdma/verbs.c
|
||||
*/
|
||||
void rpcrdma_force_disconnect(struct rpcrdma_ep *ep);
|
||||
void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
|
||||
int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
|
||||
void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
|
||||
|
||||
int rpcrdma_post_sends(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
|
||||
void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
|
||||
|
||||
/*
|
||||
|
|
|
@ -1656,7 +1656,7 @@ static int xs_get_srcport(struct sock_xprt *transport)
|
|||
unsigned short get_srcport(struct rpc_xprt *xprt)
|
||||
{
|
||||
struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
|
||||
return sock->srcport;
|
||||
return xs_sock_getport(sock->sock);
|
||||
}
|
||||
EXPORT_SYMBOL(get_srcport);
|
||||
|
||||
|
@ -2099,13 +2099,20 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
|
|||
|
||||
if (sock == NULL)
|
||||
return;
|
||||
if (!xprt->reuseport) {
|
||||
xs_close(xprt);
|
||||
return;
|
||||
}
|
||||
switch (skst) {
|
||||
default:
|
||||
case TCP_FIN_WAIT1:
|
||||
case TCP_FIN_WAIT2:
|
||||
break;
|
||||
case TCP_ESTABLISHED:
|
||||
case TCP_CLOSE_WAIT:
|
||||
kernel_sock_shutdown(sock, SHUT_RDWR);
|
||||
trace_rpc_socket_shutdown(xprt, sock);
|
||||
break;
|
||||
case TCP_CLOSE:
|
||||
case TCP_TIME_WAIT:
|
||||
default:
|
||||
xs_reset_transport(transport);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue