xprtrdma: Serialize credit accounting again
Commitfe97b47cd6
("xprtrdma: Use workqueue to process RPC/RDMA replies") replaced the reply tasklet with a workqueue that allows RPC replies to be processed in parallel. Thus the credit values in RPC-over-RDMA replies can be applied in a different order than in which the server sent them. To fix this, revert commiteba8ff660b
("xprtrdma: Move credit update to RPC reply handler"). Reverting is done by hand to accommodate code changes that have occurred since then. Fixes:fe97b47cd6
("xprtrdma: Use workqueue to process . . .") Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
59aa1f9a3c
commit
23826c7aea
|
@ -797,7 +797,6 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
|
||||||
__be32 *iptr;
|
__be32 *iptr;
|
||||||
int rdmalen, status, rmerr;
|
int rdmalen, status, rmerr;
|
||||||
unsigned long cwnd;
|
unsigned long cwnd;
|
||||||
u32 credits;
|
|
||||||
|
|
||||||
dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
|
dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
|
||||||
|
|
||||||
|
@ -928,15 +927,9 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
|
||||||
if (req->rl_nchunks)
|
if (req->rl_nchunks)
|
||||||
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
|
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
|
||||||
|
|
||||||
credits = be32_to_cpu(headerp->rm_credit);
|
|
||||||
if (credits == 0)
|
|
||||||
credits = 1; /* don't deadlock */
|
|
||||||
else if (credits > r_xprt->rx_buf.rb_max_requests)
|
|
||||||
credits = r_xprt->rx_buf.rb_max_requests;
|
|
||||||
|
|
||||||
spin_lock_bh(&xprt->transport_lock);
|
spin_lock_bh(&xprt->transport_lock);
|
||||||
cwnd = xprt->cwnd;
|
cwnd = xprt->cwnd;
|
||||||
xprt->cwnd = credits << RPC_CWNDSHIFT;
|
xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
|
||||||
if (xprt->cwnd > cwnd)
|
if (xprt->cwnd > cwnd)
|
||||||
xprt_release_rqst_cong(rqst->rq_task);
|
xprt_release_rqst_cong(rqst->rq_task);
|
||||||
|
|
||||||
|
|
|
@ -190,6 +190,28 @@ rpcrdma_receive_worker(struct work_struct *work)
|
||||||
rpcrdma_reply_handler(rep);
|
rpcrdma_reply_handler(rep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Perform basic sanity checking to avoid using garbage
|
||||||
|
* to update the credit grant value.
|
||||||
|
*/
|
||||||
|
static void
|
||||||
|
rpcrdma_update_granted_credits(struct rpcrdma_rep *rep)
|
||||||
|
{
|
||||||
|
struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf);
|
||||||
|
struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf;
|
||||||
|
u32 credits;
|
||||||
|
|
||||||
|
if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
|
||||||
|
return;
|
||||||
|
|
||||||
|
credits = be32_to_cpu(rmsgp->rm_credit);
|
||||||
|
if (credits == 0)
|
||||||
|
credits = 1; /* don't deadlock */
|
||||||
|
else if (credits > buffer->rb_max_requests)
|
||||||
|
credits = buffer->rb_max_requests;
|
||||||
|
|
||||||
|
atomic_set(&buffer->rb_credits, credits);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rpcrdma_recvcq_process_wc(struct ib_wc *wc)
|
rpcrdma_recvcq_process_wc(struct ib_wc *wc)
|
||||||
{
|
{
|
||||||
|
@ -211,7 +233,8 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc)
|
||||||
ib_dma_sync_single_for_cpu(rep->rr_device,
|
ib_dma_sync_single_for_cpu(rep->rr_device,
|
||||||
rdmab_addr(rep->rr_rdmabuf),
|
rdmab_addr(rep->rr_rdmabuf),
|
||||||
rep->rr_len, DMA_FROM_DEVICE);
|
rep->rr_len, DMA_FROM_DEVICE);
|
||||||
prefetch(rdmab_to_msg(rep->rr_rdmabuf));
|
|
||||||
|
rpcrdma_update_granted_credits(rep);
|
||||||
|
|
||||||
out_schedule:
|
out_schedule:
|
||||||
queue_work(rpcrdma_receive_wq, &rep->rr_work);
|
queue_work(rpcrdma_receive_wq, &rep->rr_work);
|
||||||
|
@ -330,6 +353,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
||||||
connected:
|
connected:
|
||||||
dprintk("RPC: %s: %sconnected\n",
|
dprintk("RPC: %s: %sconnected\n",
|
||||||
__func__, connstate > 0 ? "" : "dis");
|
__func__, connstate > 0 ? "" : "dis");
|
||||||
|
atomic_set(&xprt->rx_buf.rb_credits, 1);
|
||||||
ep->rep_connected = connstate;
|
ep->rep_connected = connstate;
|
||||||
rpcrdma_conn_func(ep);
|
rpcrdma_conn_func(ep);
|
||||||
wake_up_all(&ep->rep_connect_wait);
|
wake_up_all(&ep->rep_connect_wait);
|
||||||
|
@ -943,6 +967,7 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
|
||||||
buf->rb_max_requests = r_xprt->rx_data.max_requests;
|
buf->rb_max_requests = r_xprt->rx_data.max_requests;
|
||||||
buf->rb_bc_srv_max_requests = 0;
|
buf->rb_bc_srv_max_requests = 0;
|
||||||
spin_lock_init(&buf->rb_lock);
|
spin_lock_init(&buf->rb_lock);
|
||||||
|
atomic_set(&buf->rb_credits, 1);
|
||||||
|
|
||||||
rc = ia->ri_ops->ro_init(r_xprt);
|
rc = ia->ri_ops->ro_init(r_xprt);
|
||||||
if (rc)
|
if (rc)
|
||||||
|
|
|
@ -311,6 +311,7 @@ struct rpcrdma_buffer {
|
||||||
struct list_head rb_send_bufs;
|
struct list_head rb_send_bufs;
|
||||||
struct list_head rb_recv_bufs;
|
struct list_head rb_recv_bufs;
|
||||||
u32 rb_max_requests;
|
u32 rb_max_requests;
|
||||||
|
atomic_t rb_credits; /* most recent credit grant */
|
||||||
|
|
||||||
u32 rb_bc_srv_max_requests;
|
u32 rb_bc_srv_max_requests;
|
||||||
spinlock_t rb_reqslock; /* protect rb_allreqs */
|
spinlock_t rb_reqslock; /* protect rb_allreqs */
|
||||||
|
|
Loading…
Reference in New Issue