xprtrdma: Per-mode handling for Remote Invalidation

Refactoring change: Remote Invalidation is particular to the memory
registration mode that is use. Use a callout instead of a generic
function to handle Remote Invalidation.

This gets rid of the 8-byte flags field in struct rpcrdma_mw, of
which only a single bit flag has been allocated.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
Chuck Lever 2017-12-14 20:56:26 -05:00 committed by Anna Schumaker
parent 42b9f5c58a
commit c34416182f
4 changed files with 27 additions and 30 deletions

View File

@ -450,6 +450,26 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
return ERR_PTR(-ENOTCONN);
}
/* Handle a remotely invalidated mw on the @mws list
*/
static void
frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mws)
{
struct rpcrdma_mw *mw;
list_for_each_entry(mw, mws, mw_list)
if (mw->mw_handle == rep->rr_inv_rkey) {
struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
list_del(&mw->mw_list);
mw->frmr.fr_state = FRMR_IS_INVALID;
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir);
rpcrdma_put_mw(r_xprt, mw);
break; /* only one invalidated MR per RPC */
}
}
/* Invalidate all memory regions that were registered for "req".
*
* Sleeps until it is safe for the host CPU to access the
@ -478,9 +498,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
list_for_each_entry(mw, mws, mw_list) {
mw->frmr.fr_state = FRMR_IS_INVALID;
if (mw->mw_flags & RPCRDMA_MW_F_RI)
continue;
f = &mw->frmr;
dprintk("RPC: %s: invalidating frmr %p\n",
__func__, f);
@ -553,6 +570,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
.ro_reminv = frwr_op_reminv,
.ro_unmap_sync = frwr_op_unmap_sync,
.ro_recover_mr = frwr_op_recover_mr,
.ro_open = frwr_op_open,

View File

@ -984,24 +984,6 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
return fixup_copy_count;
}
/* Caller must guarantee @rep remains stable during this call.
*/
static void
rpcrdma_mark_remote_invalidation(struct list_head *mws,
struct rpcrdma_rep *rep)
{
struct rpcrdma_mw *mw;
if (!(rep->rr_wc_flags & IB_WC_WITH_INVALIDATE))
return;
list_for_each_entry(mw, mws, mw_list)
if (mw->mw_handle == rep->rr_inv_rkey) {
mw->mw_flags = RPCRDMA_MW_F_RI;
break; /* only one invalidated MR per RPC */
}
}
/* By convention, backchannel calls arrive via rdma_msg type
* messages, and never populate the chunk lists. This makes
* the RPC/RDMA header small and fixed in size, so it is
@ -1339,9 +1321,11 @@ void rpcrdma_deferred_completion(struct work_struct *work)
struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work);
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
rpcrdma_release_rqst(rep->rr_rxprt, req);
if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
r_xprt->rx_ia.ri_ops->ro_reminv(rep, &req->rl_registered);
rpcrdma_release_rqst(r_xprt, req);
rpcrdma_complete_rqst(rep);
}

View File

@ -1307,7 +1307,6 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
if (!mw)
goto out_nomws;
mw->mw_flags = 0;
return mw;
out_nomws:

View File

@ -272,7 +272,6 @@ struct rpcrdma_mw {
struct scatterlist *mw_sg;
int mw_nents;
enum dma_data_direction mw_dir;
unsigned long mw_flags;
union {
struct rpcrdma_fmr fmr;
struct rpcrdma_frmr frmr;
@ -284,11 +283,6 @@ struct rpcrdma_mw {
struct list_head mw_all;
};
/* mw_flags */
enum {
RPCRDMA_MW_F_RI = 1,
};
/*
* struct rpcrdma_req -- structure central to the request/reply sequence.
*
@ -485,6 +479,8 @@ struct rpcrdma_memreg_ops {
(*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool,
struct rpcrdma_mw **);
void (*ro_reminv)(struct rpcrdma_rep *rep,
struct list_head *mws);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct list_head *);
void (*ro_recover_mr)(struct rpcrdma_mw *);