ixgbevf: don't bother clearing tx_buffer_info in ixgbevf_clean_tx_ring()

In the case of the Tx rings we need to only clear the Tx buffer_info when
we are resetting the rings.  Ideally we do this when we configure the ring
to bring it back up instead of when we are taking it down in order to avoid
dirtying pages we don't need to.

In addition we don't need to clear the Tx descriptor ring since we will
fully repopulate it when we begin transmitting frames and next_to_watch can
be cleared to prevent the ring from being cleaned beyond that point instead
of needing to touch anything in the Tx descriptor ring.

Finally with these changes we can avoid having to reset the skb member of
the Tx buffer_info structure in the cleanup path since the skb will always
be associated with the first buffer which has next_to_watch set.

Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Emil Tantilov 2017-12-11 10:37:31 -08:00 committed by Jeff Kirsher
parent 6f3554548e
commit 865a4d987b
1 changed files with 72 additions and 43 deletions

View File

@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
} }
} }
static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *tx_buffer)
{
if (tx_buffer->skb) {
dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
} else if (dma_unmap_len(tx_buffer, len)) {
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
tx_buffer->next_to_watch = NULL;
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0);
/* tx_buffer must be completely set up in the transmit path */
}
static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
{ {
return ring->stats.packets; return ring->stats.packets;
@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
DMA_TO_DEVICE); DMA_TO_DEVICE);
/* clear tx_buffer data */ /* clear tx_buffer data */
tx_buffer->skb = NULL;
dma_unmap_len_set(tx_buffer, len, 0); dma_unmap_len_set(tx_buffer, len, 0);
/* unmap remaining buffers */ /* unmap remaining buffers */
@ -1576,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (1u << 8) | /* HTHRESH = 1 */ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */ 32; /* PTHRESH = 32 */
/* reinitialize tx_buffer_info */
memset(ring->tx_buffer_info, 0,
sizeof(struct ixgbevf_tx_buffer) * ring->count);
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@ -2184,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
**/ **/
static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
{ {
struct ixgbevf_tx_buffer *tx_buffer_info; u16 i = tx_ring->next_to_clean;
unsigned long size; struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
unsigned int i;
if (!tx_ring->tx_buffer_info) while (i != tx_ring->next_to_use) {
return; union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) { dev_kfree_skb_any(tx_buffer->skb);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); /* unmap skb header data */
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
/* check for eop_desc to determine the end of the packet */
eop_desc = tx_buffer->next_to_watch;
tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
tx_buffer++;
tx_desc++;
i++;
if (unlikely(i == tx_ring->count)) {
i = 0;
tx_buffer = tx_ring->tx_buffer_info;
tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
}
/* unmap any remaining paged data */
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
}
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
if (unlikely(i == tx_ring->count)) {
i = 0;
tx_buffer = tx_ring->tx_buffer_info;
}
} }
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; /* reset next_to_use and next_to_clean */
memset(tx_ring->tx_buffer_info, 0, size); tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
memset(tx_ring->desc, 0, tx_ring->size);
} }
/** /**
@ -3030,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
int size; int size;
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
tx_ring->tx_buffer_info = vzalloc(size); tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info) if (!tx_ring->tx_buffer_info)
goto err; goto err;
@ -3634,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
return; return;
dma_error: dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n"); dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */ /* clear dma mappings for failed tx_buffer_info map */
for (;;) { while (tx_buffer != first) {
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
if (i-- == 0)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; tx_buffer = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
if (tx_buffer == first)
break;
if (i == 0)
i = tx_ring->count;
i--;
} }
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
dev_kfree_skb_any(tx_buffer->skb);
tx_buffer->skb = NULL;
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }