net/mlx4_en: Schedule napi when RX buffers allocation fails
When system is out of memory, refilling of RX buffers fails while the driver continue to pass the received packets to the kernel stack. At some point, when all RX buffers deplete, driver may fall into a sleep, and not recover when memory for new RX buffers is once again availible. This is because hardware does not have valid descriptors, so no interrupt will be generated for the driver to return to work in napi context. Fix it by schedule the napi poll function from stats_task delayed workqueue, as long as the allocations fail. Signed-off-by: Ido Shamay <idos@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
c232d8a8bb
commit
07841f9d94
|
@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
|
||||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||||
mlx4_en_ptp_overflow_check(mdev);
|
mlx4_en_ptp_overflow_check(mdev);
|
||||||
|
|
||||||
|
mlx4_en_recover_from_oom(priv);
|
||||||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||||
SERVICE_TASK_DELAY);
|
SERVICE_TASK_DELAY);
|
||||||
}
|
}
|
||||||
|
|
|
@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
|
||||||
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
|
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
|
||||||
|
{
|
||||||
|
BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
|
||||||
|
return ring->prod == ring->cons;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
|
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
|
||||||
{
|
{
|
||||||
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
|
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
|
||||||
|
@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
|
||||||
ring->cons, ring->prod);
|
ring->cons, ring->prod);
|
||||||
|
|
||||||
/* Unmap and free Rx buffers */
|
/* Unmap and free Rx buffers */
|
||||||
BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
|
while (!mlx4_en_is_ring_empty(ring)) {
|
||||||
while (ring->cons != ring->prod) {
|
|
||||||
index = ring->cons & ring->size_mask;
|
index = ring->cons & ring->size_mask;
|
||||||
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
|
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
|
||||||
mlx4_en_free_rx_desc(priv, ring, index);
|
mlx4_en_free_rx_desc(priv, ring, index);
|
||||||
|
@ -491,6 +496,23 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* We recover from out of memory by scheduling our napi poll
|
||||||
|
* function (mlx4_en_process_cq), which tries to allocate
|
||||||
|
* all missing RX buffers (call to mlx4_en_refill_rx_buffers).
|
||||||
|
*/
|
||||||
|
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
|
||||||
|
{
|
||||||
|
int ring;
|
||||||
|
|
||||||
|
if (!priv->port_up)
|
||||||
|
return;
|
||||||
|
|
||||||
|
for (ring = 0; ring < priv->rx_ring_num; ring++) {
|
||||||
|
if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
|
||||||
|
napi_reschedule(&priv->rx_cq[ring]->napi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
|
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_rx_ring **pring,
|
struct mlx4_en_rx_ring **pring,
|
||||||
u32 size, u16 stride)
|
u32 size, u16 stride)
|
||||||
|
|
|
@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
|
||||||
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
|
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_tx_ring *ring);
|
struct mlx4_en_tx_ring *ring);
|
||||||
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
|
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
|
||||||
|
void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
|
||||||
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
|
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
|
||||||
struct mlx4_en_rx_ring **pring,
|
struct mlx4_en_rx_ring **pring,
|
||||||
u32 size, u16 stride, int node);
|
u32 size, u16 stride, int node);
|
||||||
|
|
Loading…
Reference in New Issue