xdp: introduce xdp_return_frame_rx_napi
When sending an xdp_frame through xdp_do_redirect call, then error cases can happen where the xdp_frame needs to be dropped, and returning an -errno code isn't sufficient/possible any-longer (e.g. for cpumap case). This is already fully supported, by simply calling xdp_return_frame. This patch is an optimization, which provides xdp_return_frame_rx_napi, which is a faster variant for these error cases. It take advantage of the protection provided by XDP RX running under NAPI protection. This change is mostly relevant for drivers using the page_pool allocator as it can take advantage of this. (Tested with mlx5). Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
9940fbf633
commit
389ab7f01a
|
@ -115,13 +115,14 @@ void page_pool_destroy(struct page_pool *pool);
|
||||||
void __page_pool_put_page(struct page_pool *pool,
|
void __page_pool_put_page(struct page_pool *pool,
|
||||||
struct page *page, bool allow_direct);
|
struct page *page, bool allow_direct);
|
||||||
|
|
||||||
static inline void page_pool_put_page(struct page_pool *pool, struct page *page)
|
static inline void page_pool_put_page(struct page_pool *pool,
|
||||||
|
struct page *page, bool allow_direct)
|
||||||
{
|
{
|
||||||
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
|
/* When page_pool isn't compiled-in, net/core/xdp.c doesn't
|
||||||
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
|
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PAGE_POOL
|
#ifdef CONFIG_PAGE_POOL
|
||||||
__page_pool_put_page(pool, page, false);
|
__page_pool_put_page(pool, page, allow_direct);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
/* Very limited use-cases allow recycle direct */
|
/* Very limited use-cases allow recycle direct */
|
||||||
|
|
|
@ -104,6 +104,7 @@ struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
|
||||||
}
|
}
|
||||||
|
|
||||||
void xdp_return_frame(struct xdp_frame *xdpf);
|
void xdp_return_frame(struct xdp_frame *xdpf);
|
||||||
|
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf);
|
||||||
void xdp_return_buff(struct xdp_buff *xdp);
|
void xdp_return_buff(struct xdp_buff *xdp);
|
||||||
|
|
||||||
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
|
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
|
||||||
|
|
|
@ -578,7 +578,7 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
|
||||||
err = __ptr_ring_produce(q, xdpf);
|
err = __ptr_ring_produce(q, xdpf);
|
||||||
if (err) {
|
if (err) {
|
||||||
drops++;
|
drops++;
|
||||||
xdp_return_frame(xdpf);
|
xdp_return_frame_rx_napi(xdpf);
|
||||||
}
|
}
|
||||||
processed++;
|
processed++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -239,7 +239,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
|
||||||
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
|
err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
|
||||||
if (err) {
|
if (err) {
|
||||||
drops++;
|
drops++;
|
||||||
xdp_return_frame(xdpf);
|
xdp_return_frame_rx_napi(xdpf);
|
||||||
} else {
|
} else {
|
||||||
sent++;
|
sent++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -308,7 +308,13 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
|
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
|
||||||
|
|
||||||
static void xdp_return(void *data, struct xdp_mem_info *mem)
|
/* XDP RX runs under NAPI protection, and in different delivery error
|
||||||
|
* scenarios (e.g. queue full), it is possible to return the xdp_frame
|
||||||
|
* while still leveraging this protection. The @napi_direct boolian
|
||||||
|
* is used for those calls sites. Thus, allowing for faster recycling
|
||||||
|
* of xdp_frames/pages in those cases.
|
||||||
|
*/
|
||||||
|
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
|
||||||
{
|
{
|
||||||
struct xdp_mem_allocator *xa;
|
struct xdp_mem_allocator *xa;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
@ -320,7 +326,7 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)
|
||||||
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
|
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
|
||||||
page = virt_to_head_page(data);
|
page = virt_to_head_page(data);
|
||||||
if (xa)
|
if (xa)
|
||||||
page_pool_put_page(xa->page_pool, page);
|
page_pool_put_page(xa->page_pool, page, napi_direct);
|
||||||
else
|
else
|
||||||
put_page(page);
|
put_page(page);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
@ -340,12 +346,18 @@ static void xdp_return(void *data, struct xdp_mem_info *mem)
|
||||||
|
|
||||||
void xdp_return_frame(struct xdp_frame *xdpf)
|
void xdp_return_frame(struct xdp_frame *xdpf)
|
||||||
{
|
{
|
||||||
xdp_return(xdpf->data, &xdpf->mem);
|
__xdp_return(xdpf->data, &xdpf->mem, false);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xdp_return_frame);
|
EXPORT_SYMBOL_GPL(xdp_return_frame);
|
||||||
|
|
||||||
|
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
|
||||||
|
{
|
||||||
|
__xdp_return(xdpf->data, &xdpf->mem, true);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
|
||||||
|
|
||||||
void xdp_return_buff(struct xdp_buff *xdp)
|
void xdp_return_buff(struct xdp_buff *xdp)
|
||||||
{
|
{
|
||||||
xdp_return(xdp->data, &xdp->rxq->mem);
|
__xdp_return(xdp->data, &xdp->rxq->mem, true);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xdp_return_buff);
|
EXPORT_SYMBOL_GPL(xdp_return_buff);
|
||||||
|
|
Loading…
Reference in New Issue