IB/mlx5: Don't return errors from poll_cq

Remove returning errors from mlx5 poll_cq function. Polling CQ
operation in kernel never fails by Mellanox HCA architecture and
respective driver design.

Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Leon Romanovsky 2016-08-28 10:58:38 +03:00 committed by Doug Ledford
parent d9f88e5ab9
commit dbdf7d4e7f
1 changed files with 2 additions and 20 deletions

View File

@ -553,12 +553,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
* from the table. * from the table.
*/ */
mqp = __mlx5_qp_lookup(dev->mdev, qpn); mqp = __mlx5_qp_lookup(dev->mdev, qpn);
if (unlikely(!mqp)) {
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
cq->mcq.cqn, qpn);
return -EINVAL;
}
*cur_qp = to_mibqp(mqp); *cur_qp = to_mibqp(mqp);
} }
@ -619,13 +613,6 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
read_lock(&dev->mdev->priv.mkey_table.lock); read_lock(&dev->mdev->priv.mkey_table.lock);
mmkey = __mlx5_mr_lookup(dev->mdev, mmkey = __mlx5_mr_lookup(dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey))); mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
if (unlikely(!mmkey)) {
read_unlock(&dev->mdev->priv.mkey_table.lock);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
}
mr = to_mibmr(mmkey); mr = to_mibmr(mmkey);
get_sig_err_item(sig_err_cqe, &mr->sig->err_item); get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
mr->sig->sig_err_exists = true; mr->sig->sig_err_exists = true;
@ -676,7 +663,6 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
unsigned long flags; unsigned long flags;
int soft_polled = 0; int soft_polled = 0;
int npolled; int npolled;
int err = 0;
spin_lock_irqsave(&cq->lock, flags); spin_lock_irqsave(&cq->lock, flags);
if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@ -688,8 +674,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
soft_polled = poll_soft_wc(cq, num_entries, wc); soft_polled = poll_soft_wc(cq, num_entries, wc);
for (npolled = 0; npolled < num_entries - soft_polled; npolled++) { for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled); if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
if (err)
break; break;
} }
@ -698,10 +683,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
out: out:
spin_unlock_irqrestore(&cq->lock, flags); spin_unlock_irqrestore(&cq->lock, flags);
if (err == 0 || err == -EAGAIN)
return soft_polled + npolled; return soft_polled + npolled;
else
return err;
} }
int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)