IB/core: Add support for draining IB_POLL_DIRECT completion queues

Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Max Gurtovoy <maxg@mellanox.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Bart Van Assche 2017-02-14 10:56:35 -08:00 committed by Doug Ledford
parent b02c15360b
commit f039f44fc3
2 changed files with 17 additions and 22 deletions

View File

@ -58,8 +58,8 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
* %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
* context and does not ask for completion interrupts from the HCA. * context and does not ask for completion interrupts from the HCA.
* *
* Note: for compatibility reasons -1 can be passed in %budget for unlimited * Note: do not pass -1 as %budget unless it is guaranteed that the number
* polling. Do not use this feature in new code, it will be removed soon. * of completions that will be processed is small.
*/ */
int ib_process_cq_direct(struct ib_cq *cq, int budget) int ib_process_cq_direct(struct ib_cq *cq, int budget)
{ {

View File

@ -1948,17 +1948,12 @@ static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
*/ */
static void __ib_drain_sq(struct ib_qp *qp) static void __ib_drain_sq(struct ib_qp *qp)
{ {
struct ib_cq *cq = qp->send_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe sdrain; struct ib_drain_cqe sdrain;
struct ib_send_wr swr = {}, *bad_swr; struct ib_send_wr swr = {}, *bad_swr;
int ret; int ret;
if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
return;
}
swr.wr_cqe = &sdrain.cqe; swr.wr_cqe = &sdrain.cqe;
sdrain.cqe.done = ib_drain_qp_done; sdrain.cqe.done = ib_drain_qp_done;
init_completion(&sdrain.done); init_completion(&sdrain.done);
@ -1975,7 +1970,11 @@ static void __ib_drain_sq(struct ib_qp *qp)
return; return;
} }
wait_for_completion(&sdrain.done); if (cq->poll_ctx == IB_POLL_DIRECT)
while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
ib_process_cq_direct(cq, -1);
else
wait_for_completion(&sdrain.done);
} }
/* /*
@ -1983,17 +1982,12 @@ static void __ib_drain_sq(struct ib_qp *qp)
*/ */
static void __ib_drain_rq(struct ib_qp *qp) static void __ib_drain_rq(struct ib_qp *qp)
{ {
struct ib_cq *cq = qp->recv_cq;
struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
struct ib_drain_cqe rdrain; struct ib_drain_cqe rdrain;
struct ib_recv_wr rwr = {}, *bad_rwr; struct ib_recv_wr rwr = {}, *bad_rwr;
int ret; int ret;
if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
"IB_POLL_DIRECT poll_ctx not supported for drain\n");
return;
}
rwr.wr_cqe = &rdrain.cqe; rwr.wr_cqe = &rdrain.cqe;
rdrain.cqe.done = ib_drain_qp_done; rdrain.cqe.done = ib_drain_qp_done;
init_completion(&rdrain.done); init_completion(&rdrain.done);
@ -2010,7 +2004,11 @@ static void __ib_drain_rq(struct ib_qp *qp)
return; return;
} }
wait_for_completion(&rdrain.done); if (cq->poll_ctx == IB_POLL_DIRECT)
while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
ib_process_cq_direct(cq, -1);
else
wait_for_completion(&rdrain.done);
} }
/** /**
@ -2027,8 +2025,7 @@ static void __ib_drain_rq(struct ib_qp *qp)
* ensure there is room in the CQ and SQ for the drain work request and * ensure there is room in the CQ and SQ for the drain work request and
* completion. * completion.
* *
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be * allocate the CQ using ib_alloc_cq().
* IB_POLL_DIRECT.
* *
* ensure that there are no other contexts that are posting WRs concurrently. * ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed. * Otherwise the drain is not guaranteed.
@ -2056,8 +2053,7 @@ EXPORT_SYMBOL(ib_drain_sq);
* ensure there is room in the CQ and RQ for the drain work request and * ensure there is room in the CQ and RQ for the drain work request and
* completion. * completion.
* *
* allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be * allocate the CQ using ib_alloc_cq().
* IB_POLL_DIRECT.
* *
* ensure that there are no other contexts that are posting WRs concurrently. * ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed. * Otherwise the drain is not guaranteed.
@ -2081,8 +2077,7 @@ EXPORT_SYMBOL(ib_drain_rq);
* ensure there is room in the CQ(s), SQ, and RQ for drain work requests * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
* and completions. * and completions.
* *
* allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be * allocate the CQs using ib_alloc_cq().
* IB_POLL_DIRECT.
* *
* ensure that there are no other contexts that are posting WRs concurrently. * ensure that there are no other contexts that are posting WRs concurrently.
* Otherwise the drain is not guaranteed. * Otherwise the drain is not guaranteed.