i40e: skb->xmit_more support
Support skb->xmit_more in i40e is straightforward : we need to move around i40e_maybe_stop_tx() call to correctly test netif_xmit_stopped() before taking the decision to not kick the NIC. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
44783d8751
commit
4567dc1093
|
@ -2052,6 +2052,47 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
||||||
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
|
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
|
||||||
|
* @tx_ring: the ring to be checked
|
||||||
|
* @size: the size buffer we want to assure is available
|
||||||
|
*
|
||||||
|
* Returns -EBUSY if a stop is needed, else 0
|
||||||
|
**/
|
||||||
|
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
||||||
|
{
|
||||||
|
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
||||||
|
/* Memory barrier before checking head and tail */
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/* Check again in a case another CPU has just made room available. */
|
||||||
|
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
|
/* A reprieve! - use start_queue because it doesn't call schedule */
|
||||||
|
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
||||||
|
++tx_ring->tx_stats.restart_queue;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* i40e_maybe_stop_tx - 1st level check for tx stop conditions
|
||||||
|
* @tx_ring: the ring to be checked
|
||||||
|
* @size: the size buffer we want to assure is available
|
||||||
|
*
|
||||||
|
* Returns 0 if stop is not needed
|
||||||
|
**/
|
||||||
|
#ifdef I40E_FCOE
|
||||||
|
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
||||||
|
#else
|
||||||
|
static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
|
||||||
|
return 0;
|
||||||
|
return __i40e_maybe_stop_tx(tx_ring, size);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_tx_map - Build the Tx descriptor
|
* i40e_tx_map - Build the Tx descriptor
|
||||||
* @tx_ring: ring to send buffer on
|
* @tx_ring: ring to send buffer on
|
||||||
|
@ -2195,8 +2236,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
|
|
||||||
tx_ring->next_to_use = i;
|
tx_ring->next_to_use = i;
|
||||||
|
|
||||||
|
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
||||||
/* notify HW of packet */
|
/* notify HW of packet */
|
||||||
writel(i, tx_ring->tail);
|
if (!skb->xmit_more ||
|
||||||
|
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
|
||||||
|
tx_ring->queue_index)))
|
||||||
|
writel(i, tx_ring->tail);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -2217,47 +2262,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
||||||
tx_ring->next_to_use = i;
|
tx_ring->next_to_use = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
|
|
||||||
* @tx_ring: the ring to be checked
|
|
||||||
* @size: the size buffer we want to assure is available
|
|
||||||
*
|
|
||||||
* Returns -EBUSY if a stop is needed, else 0
|
|
||||||
**/
|
|
||||||
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|
||||||
{
|
|
||||||
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
||||||
/* Memory barrier before checking head and tail */
|
|
||||||
smp_mb();
|
|
||||||
|
|
||||||
/* Check again in a case another CPU has just made room available. */
|
|
||||||
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
|
|
||||||
return -EBUSY;
|
|
||||||
|
|
||||||
/* A reprieve! - use start_queue because it doesn't call schedule */
|
|
||||||
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
|
|
||||||
++tx_ring->tx_stats.restart_queue;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* i40e_maybe_stop_tx - 1st level check for tx stop conditions
|
|
||||||
* @tx_ring: the ring to be checked
|
|
||||||
* @size: the size buffer we want to assure is available
|
|
||||||
*
|
|
||||||
* Returns 0 if stop is not needed
|
|
||||||
**/
|
|
||||||
#ifdef I40E_FCOE
|
|
||||||
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|
||||||
#else
|
|
||||||
static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
|
|
||||||
return 0;
|
|
||||||
return __i40e_maybe_stop_tx(tx_ring, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* i40e_xmit_descriptor_count - calculate number of tx descriptors needed
|
* i40e_xmit_descriptor_count - calculate number of tx descriptors needed
|
||||||
* @skb: send buffer
|
* @skb: send buffer
|
||||||
|
@ -2372,8 +2376,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
||||||
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
|
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
|
||||||
td_cmd, td_offset);
|
td_cmd, td_offset);
|
||||||
|
|
||||||
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
|
||||||
|
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
|
|
||||||
out_drop:
|
out_drop:
|
||||||
|
|
Loading…
Reference in New Issue