SUNRPC: Use RMW bitops in single-threaded hot paths

I noticed CPU pipeline stalls while using perf.

Once an svc thread is scheduled and executing an RPC, no other
processes will touch svc_rqst::rq_flags. Thus bus-locked atomics are
not needed outside the svc thread scheduler.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
Chuck Lever 2022-04-29 10:06:21 -04:00
parent bb283ca18d
commit 28df098881
7 changed files with 16 additions and 15 deletions

View File

@ -970,7 +970,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* the client wants us to do more in this compound: * the client wants us to do more in this compound:
*/ */
if (!nfsd4_last_compound_op(rqstp)) if (!nfsd4_last_compound_op(rqstp))
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* check stateid */ /* check stateid */
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh, status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
@ -2650,11 +2650,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
cstate->minorversion = args->minorversion; cstate->minorversion = args->minorversion;
fh_init(current_fh, NFS4_FHSIZE); fh_init(current_fh, NFS4_FHSIZE);
fh_init(save_fh, NFS4_FHSIZE); fh_init(save_fh, NFS4_FHSIZE);
/* /*
* Don't use the deferral mechanism for NFSv4; compounds make it * Don't use the deferral mechanism for NFSv4; compounds make it
* too hard to avoid non-idempotency problems. * too hard to avoid non-idempotency problems.
*/ */
clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); __clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
/* /*
* According to RFC3010, this takes precedence over all other errors. * According to RFC3010, this takes precedence over all other errors.
@ -2769,7 +2770,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
out: out:
cstate->status = status; cstate->status = status;
/* Reset deferral mechanism for RPC deferrals */ /* Reset deferral mechanism for RPC deferrals */
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
return rpc_success; return rpc_success;
} }

View File

@ -2411,7 +2411,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE; argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack) if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags); __clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
return true; return true;
} }

View File

@ -900,7 +900,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
* rejecting the server-computed MIC in this somewhat rare case, * rejecting the server-computed MIC in this somewhat rare case,
* do not use splice with the GSS integrity service. * do not use splice with the GSS integrity service.
*/ */
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* Did we already verify the signature on the original pass through? */ /* Did we already verify the signature on the original pass through? */
if (rqstp->rq_deferred) if (rqstp->rq_deferred)
@ -972,7 +972,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
int pad, remaining_len, offset; int pad, remaining_len, offset;
u32 rseqno; u32 rseqno;
clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
priv_len = svc_getnl(&buf->head[0]); priv_len = svc_getnl(&buf->head[0]);
if (rqstp->rq_deferred) { if (rqstp->rq_deferred) {

View File

@ -1244,10 +1244,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto err_short_len; goto err_short_len;
/* Will be turned off by GSS integrity and privacy services */ /* Will be turned off by GSS integrity and privacy services */
set_bit(RQ_SPLICE_OK, &rqstp->rq_flags); __set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
/* Will be turned off only when NFSv4 Sessions are used */ /* Will be turned off only when NFSv4 Sessions are used */
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
clear_bit(RQ_DROPME, &rqstp->rq_flags); __clear_bit(RQ_DROPME, &rqstp->rq_flags);
svc_putu32(resv, rqstp->rq_xid); svc_putu32(resv, rqstp->rq_xid);

View File

@ -1238,7 +1238,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
trace_svc_defer(rqstp); trace_svc_defer(rqstp);
svc_xprt_get(rqstp->rq_xprt); svc_xprt_get(rqstp->rq_xprt);
dr->xprt = rqstp->rq_xprt; dr->xprt = rqstp->rq_xprt;
set_bit(RQ_DROPME, &rqstp->rq_flags); __set_bit(RQ_DROPME, &rqstp->rq_flags);
dr->handle.revisit = svc_revisit; dr->handle.revisit = svc_revisit;
return &dr->handle; return &dr->handle;

View File

@ -298,9 +298,9 @@ static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
static void svc_sock_secure_port(struct svc_rqst *rqstp) static void svc_sock_secure_port(struct svc_rqst *rqstp)
{ {
if (svc_port_is_privileged(svc_addr(rqstp))) if (svc_port_is_privileged(svc_addr(rqstp)))
set_bit(RQ_SECURE, &rqstp->rq_flags); __set_bit(RQ_SECURE, &rqstp->rq_flags);
else else
clear_bit(RQ_SECURE, &rqstp->rq_flags); __clear_bit(RQ_SECURE, &rqstp->rq_flags);
} }
/* /*
@ -1008,9 +1008,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = NULL; rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP; rqstp->rq_prot = IPPROTO_TCP;
if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags)) if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
set_bit(RQ_LOCAL, &rqstp->rq_flags); __set_bit(RQ_LOCAL, &rqstp->rq_flags);
else else
clear_bit(RQ_LOCAL, &rqstp->rq_flags); __clear_bit(RQ_LOCAL, &rqstp->rq_flags);
p = (__be32 *)rqstp->rq_arg.head[0].iov_base; p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
calldir = p[1]; calldir = p[1];

View File

@ -602,7 +602,7 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt)
static void svc_rdma_secure_port(struct svc_rqst *rqstp) static void svc_rdma_secure_port(struct svc_rqst *rqstp)
{ {
set_bit(RQ_SECURE, &rqstp->rq_flags); __set_bit(RQ_SECURE, &rqstp->rq_flags);
} }
static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt) static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)