xprtrdma: Refactor frwr_is_supported
Refactor: Perform the "is supported" check in rpcrdma_ep_create() instead of in rpcrdma_ia_open(). frwr_open() is where most of the logic to query device attributes is already located. The current code displays a redundant error message when the device does not support FRWR. As an additional clean-up, this patch removes the extra message. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
18d065a5d4
commit
25868e610a
|
@ -50,28 +50,6 @@
|
|||
# define RPCDBG_FACILITY RPCDBG_TRANS
|
||||
#endif
|
||||
|
||||
/**
|
||||
* frwr_is_supported - Check if device supports FRWR
|
||||
* @device: interface adapter to check
|
||||
*
|
||||
* Returns true if device supports FRWR, otherwise false
|
||||
*/
|
||||
bool frwr_is_supported(struct ib_device *device)
|
||||
{
|
||||
struct ib_device_attr *attrs = &device->attrs;
|
||||
|
||||
if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
|
||||
goto out_not_supported;
|
||||
if (attrs->max_fast_reg_page_list_len == 0)
|
||||
goto out_not_supported;
|
||||
return true;
|
||||
|
||||
out_not_supported:
|
||||
pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
|
||||
device->name);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* frwr_release_mr - Destroy one MR
|
||||
* @mr: MR allocated by frwr_init_mr
|
||||
|
@ -170,13 +148,12 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
|||
}
|
||||
|
||||
/**
|
||||
* frwr_open - Prepare an endpoint for use with FRWR
|
||||
* @ia: interface adapter this endpoint will use
|
||||
* @ep: endpoint to prepare
|
||||
* frwr_query_device - Prepare a transport for use with FRWR
|
||||
* @r_xprt: controlling transport instance
|
||||
* @device: RDMA device to query
|
||||
*
|
||||
* On success, sets:
|
||||
* ep->rep_attr.cap.max_send_wr
|
||||
* ep->rep_attr.cap.max_recv_wr
|
||||
* ep->rep_attr
|
||||
* ep->rep_max_requests
|
||||
* ia->ri_max_rdma_segs
|
||||
*
|
||||
|
@ -184,14 +161,27 @@ int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
|
|||
* ia->ri_max_frwr_depth
|
||||
* ia->ri_mrtype
|
||||
*
|
||||
* On failure, a negative errno is returned.
|
||||
* Return values:
|
||||
* On success, returns zero.
|
||||
* %-EINVAL - the device does not support FRWR memory registration
|
||||
* %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
|
||||
*/
|
||||
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
|
||||
int frwr_query_device(struct rpcrdma_xprt *r_xprt,
|
||||
const struct ib_device *device)
|
||||
{
|
||||
struct ib_device_attr *attrs = &ia->ri_id->device->attrs;
|
||||
const struct ib_device_attr *attrs = &device->attrs;
|
||||
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
||||
struct rpcrdma_ep *ep = &r_xprt->rx_ep;
|
||||
int max_qp_wr, depth, delta;
|
||||
unsigned int max_sge;
|
||||
|
||||
if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) ||
|
||||
attrs->max_fast_reg_page_list_len == 0) {
|
||||
pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
|
||||
device->name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
max_sge = min_t(unsigned int, attrs->max_send_sge,
|
||||
RPCRDMA_MAX_SEND_SGES);
|
||||
if (max_sge < RPCRDMA_MIN_SEND_SGES) {
|
||||
|
@ -238,7 +228,7 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
|
|||
} while (delta > 0);
|
||||
}
|
||||
|
||||
max_qp_wr = ia->ri_id->device->attrs.max_qp_wr;
|
||||
max_qp_wr = attrs->max_qp_wr;
|
||||
max_qp_wr -= RPCRDMA_BACKWARD_WRS;
|
||||
max_qp_wr -= 1;
|
||||
if (max_qp_wr < RPCRDMA_MIN_SLOT_TABLE)
|
||||
|
@ -249,7 +239,7 @@ int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep)
|
|||
if (ep->rep_attr.cap.max_send_wr > max_qp_wr) {
|
||||
ep->rep_max_requests = max_qp_wr / depth;
|
||||
if (!ep->rep_max_requests)
|
||||
return -EINVAL;
|
||||
return -ENOMEM;
|
||||
ep->rep_attr.cap.max_send_wr = ep->rep_max_requests * depth;
|
||||
}
|
||||
ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
|
||||
|
|
|
@ -368,18 +368,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
switch (xprt_rdma_memreg_strategy) {
|
||||
case RPCRDMA_FRWR:
|
||||
if (frwr_is_supported(ia->ri_id->device))
|
||||
break;
|
||||
/*FALLTHROUGH*/
|
||||
default:
|
||||
pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
|
||||
ia->ri_id->device->name, xprt_rdma_memreg_strategy);
|
||||
rc = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
|
@ -479,7 +467,7 @@ int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
|
|||
ep->rep_inline_send = xprt_rdma_max_inline_write;
|
||||
ep->rep_inline_recv = xprt_rdma_max_inline_read;
|
||||
|
||||
rc = frwr_open(ia, ep);
|
||||
rc = frwr_query_device(r_xprt, ia->ri_id->device);
|
||||
if (rc)
|
||||
return rc;
|
||||
r_xprt->rx_buf.rb_max_requests = cpu_to_be32(ep->rep_max_requests);
|
||||
|
|
|
@ -534,9 +534,9 @@ rpcrdma_data_dir(bool writing)
|
|||
|
||||
/* Memory registration calls xprtrdma/frwr_ops.c
|
||||
*/
|
||||
bool frwr_is_supported(struct ib_device *device);
|
||||
void frwr_reset(struct rpcrdma_req *req);
|
||||
int frwr_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep);
|
||||
int frwr_query_device(struct rpcrdma_xprt *r_xprt,
|
||||
const struct ib_device *device);
|
||||
int frwr_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr);
|
||||
void frwr_release_mr(struct rpcrdma_mr *mr);
|
||||
struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
|
||||
|
|
Loading…
Reference in New Issue