IB/hfi1: Close VNIC sdma_progress sleep window
The call to sdma_progress() is called outside the wait lock. In this case, there is a race condition where sdma_progress() can return false and the sdma_engine can idle. If that happens, there will be no more sdma interrupts to cause the wakeup and the vnic_sdma xmit will hang. Fix by moving the lock to enclose the sdma_progress() call. Also, delete the tx_retry. The need for this was removed by: commitbcad29137a
("IB/hfi1: Serve the most starved iowait entry first") Fixes:64551ede6c
("IB/hfi1: VNIC SDMA support") Reviewed-by: Gary Leshner <Gary.S.Leshner@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
5190f052a3
commit
18912c4524
|
@ -57,7 +57,6 @@
|
|||
|
||||
#define HFI1_VNIC_TXREQ_NAME_LEN 32
|
||||
#define HFI1_VNIC_SDMA_DESC_WTRMRK 64
|
||||
#define HFI1_VNIC_SDMA_RETRY_COUNT 1
|
||||
|
||||
/*
|
||||
* struct vnic_txreq - VNIC transmit descriptor
|
||||
|
@ -67,7 +66,6 @@
|
|||
* @pad: pad buffer
|
||||
* @plen: pad length
|
||||
* @pbc_val: pbc value
|
||||
* @retry_count: tx retry count
|
||||
*/
|
||||
struct vnic_txreq {
|
||||
struct sdma_txreq txreq;
|
||||
|
@ -77,8 +75,6 @@ struct vnic_txreq {
|
|||
unsigned char pad[HFI1_VNIC_MAX_PAD];
|
||||
u16 plen;
|
||||
__le64 pbc_val;
|
||||
|
||||
u32 retry_count;
|
||||
};
|
||||
|
||||
static void vnic_sdma_complete(struct sdma_txreq *txreq,
|
||||
|
@ -196,7 +192,6 @@ int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
|
|||
ret = build_vnic_tx_desc(sde, tx, pbc);
|
||||
if (unlikely(ret))
|
||||
goto free_desc;
|
||||
tx->retry_count = 0;
|
||||
|
||||
ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
|
||||
&tx->txreq, vnic_sdma->pkts_sent);
|
||||
|
@ -238,14 +233,14 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
|
|||
struct hfi1_vnic_sdma *vnic_sdma =
|
||||
container_of(wait->iow, struct hfi1_vnic_sdma, wait);
|
||||
struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
|
||||
struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
|
||||
|
||||
if (sdma_progress(sde, seq, txreq))
|
||||
if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT)
|
||||
write_seqlock(&dev->iowait_lock);
|
||||
if (sdma_progress(sde, seq, txreq)) {
|
||||
write_sequnlock(&dev->iowait_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
|
||||
write_seqlock(&dev->iowait_lock);
|
||||
if (list_empty(&vnic_sdma->wait.list))
|
||||
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
|
||||
write_sequnlock(&dev->iowait_lock);
|
||||
|
|
Loading…
Reference in New Issue