mirror of https://gitee.com/openkylin/linux.git
ixgbevf: update code to better handle incrementing page count
Based on commit bd4171a5d4
("igb: update code to better handle incrementing page count")
Update the driver code so that we do bulk updates of the page reference
count instead of just incrementing it by one reference at a time. The
advantage to doing this is that we cut down on atomic operations and
this in turn should give us a slight improvement in cycles per packet.
In addition if we eventually move this over to using build_skb the gains
will be more noticeable.
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
16b359498b
commit
35074d698d
|
@ -62,7 +62,12 @@ struct ixgbevf_tx_buffer {
|
||||||
struct ixgbevf_rx_buffer {
|
struct ixgbevf_rx_buffer {
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned int page_offset;
|
#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
|
||||||
|
__u32 page_offset;
|
||||||
|
#else
|
||||||
|
__u16 page_offset;
|
||||||
|
#endif
|
||||||
|
__u16 pagecnt_bias;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ixgbevf_stats {
|
struct ixgbevf_stats {
|
||||||
|
|
|
@ -611,6 +611,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
|
||||||
bi->dma = dma;
|
bi->dma = dma;
|
||||||
bi->page = page;
|
bi->page = page;
|
||||||
bi->page_offset = 0;
|
bi->page_offset = 0;
|
||||||
|
bi->pagecnt_bias = 1;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -747,6 +748,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
|
||||||
new_buff->page = old_buff->page;
|
new_buff->page = old_buff->page;
|
||||||
new_buff->dma = old_buff->dma;
|
new_buff->dma = old_buff->dma;
|
||||||
new_buff->page_offset = old_buff->page_offset;
|
new_buff->page_offset = old_buff->page_offset;
|
||||||
|
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool ixgbevf_page_is_reserved(struct page *page)
|
static inline bool ixgbevf_page_is_reserved(struct page *page)
|
||||||
|
@ -758,13 +760,15 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
|
||||||
struct page *page,
|
struct page *page,
|
||||||
const unsigned int truesize)
|
const unsigned int truesize)
|
||||||
{
|
{
|
||||||
|
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
|
||||||
|
|
||||||
/* avoid re-using remote pages */
|
/* avoid re-using remote pages */
|
||||||
if (unlikely(ixgbevf_page_is_reserved(page)))
|
if (unlikely(ixgbevf_page_is_reserved(page)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
#if (PAGE_SIZE < 8192)
|
#if (PAGE_SIZE < 8192)
|
||||||
/* if we are only owner of page we can reuse it */
|
/* if we are only owner of page we can reuse it */
|
||||||
if (unlikely(page_count(page) != 1))
|
if (unlikely(page_ref_count(page) != pagecnt_bias))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* flip page offset to other buffer */
|
/* flip page offset to other buffer */
|
||||||
|
@ -778,10 +782,15 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
/* Even if we own the page, we are not allowed to use atomic_set()
|
|
||||||
* This would break get_page_unless_zero() users.
|
/* If we have drained the page fragment pool we need to update
|
||||||
|
* the pagecnt_bias and page count so that we fully restock the
|
||||||
|
* number of references the driver holds.
|
||||||
*/
|
*/
|
||||||
page_ref_inc(page);
|
if (unlikely(pagecnt_bias == 1)) {
|
||||||
|
page_ref_add(page, USHRT_MAX);
|
||||||
|
rx_buffer->pagecnt_bias = USHRT_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -827,7 +836,6 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* this page cannot be reused so discard it */
|
/* this page cannot be reused so discard it */
|
||||||
put_page(page);
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -899,10 +907,13 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
|
||||||
/* hand second half of page back to the ring */
|
/* hand second half of page back to the ring */
|
||||||
ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
|
ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
|
||||||
} else {
|
} else {
|
||||||
/* we are not reusing the buffer so unmap it */
|
/* We are not reusing the buffer so unmap it and free
|
||||||
|
* any references we are holding to it
|
||||||
|
*/
|
||||||
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
|
||||||
PAGE_SIZE, DMA_FROM_DEVICE,
|
PAGE_SIZE, DMA_FROM_DEVICE,
|
||||||
IXGBEVF_RX_DMA_ATTR);
|
IXGBEVF_RX_DMA_ATTR);
|
||||||
|
__page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear contents of buffer_info */
|
/* clear contents of buffer_info */
|
||||||
|
@ -2135,6 +2146,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
|
||||||
struct ixgbevf_rx_buffer *rx_buffer;
|
struct ixgbevf_rx_buffer *rx_buffer;
|
||||||
|
|
||||||
rx_buffer = &rx_ring->rx_buffer_info[i];
|
rx_buffer = &rx_ring->rx_buffer_info[i];
|
||||||
|
if (!rx_buffer->page)
|
||||||
|
continue;
|
||||||
|
|
||||||
/* Invalidate cache lines that may have been written to by
|
/* Invalidate cache lines that may have been written to by
|
||||||
* device so that we avoid corrupting memory.
|
* device so that we avoid corrupting memory.
|
||||||
|
@ -2152,8 +2165,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
|
||||||
DMA_FROM_DEVICE,
|
DMA_FROM_DEVICE,
|
||||||
IXGBEVF_RX_DMA_ATTR);
|
IXGBEVF_RX_DMA_ATTR);
|
||||||
|
|
||||||
if (rx_buffer->page)
|
__page_frag_cache_drain(rx_buffer->page,
|
||||||
__free_page(rx_buffer->page);
|
rx_buffer->pagecnt_bias);
|
||||||
|
|
||||||
rx_buffer->page = NULL;
|
rx_buffer->page = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue