iw_cxgb4: Fix cxgb4 arm CQ logic w/IB_CQ_REPORT_MISSED_EVENTS

Current cxgb4 arm CQ logic ignores IB_CQ_REPORT_MISSED_EVENTS for
request completion notification on a CQ. Due to this ib_poll_handler()
assumes all events polled and avoids further iopoll scheduling.

This patch adds logic to cxgb4 ib_req_notify_cq() handler to check if
CQ is not empty and return accordingly. Based on the return value of
ib_req_notify_cq() handler, ib_poll_handler() will schedule a run of
iopoll handler.

Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Bharat Potnuri 2016-08-23 20:27:33 +05:30 committed by Doug Ledford
parent faa739fb5d
commit cff069b78c
2 changed files with 10 additions and 5 deletions
drivers/infiniband/hw/cxgb4

View File

@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct c4iw_cq *chp;
int ret;
int ret = 0;
unsigned long flag;
chp = to_c4iw_cq(ibcq);
spin_lock_irqsave(&chp->lock, flag);
ret = t4_arm_cq(&chp->cq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
t4_arm_cq(&chp->cq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
if (flags & IB_CQ_REPORT_MISSED_EVENTS)
ret = t4_cq_notempty(&chp->cq);
spin_unlock_irqrestore(&chp->lock, flag);
if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
ret = 0;
return ret;
}

View File

@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
return (CQE_GENBIT(cqe) == cq->gen);
}
static inline int t4_cq_notempty(struct t4_cq *cq)
{
return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
}
static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
{
int ret;