mirror of https://gitee.com/openkylin/linux.git
net/mlx5e: XDP, Maintain a FIFO structure for xdp_info instances
This provides infrastructure to have multiple xdp_info instances for the same consumer index. Signed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
b8180392ed
commit
fea28dd6a2
|
@ -404,15 +404,24 @@ struct mlx5e_xdp_info {
|
||||||
struct mlx5e_dma_info di;
|
struct mlx5e_dma_info di;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct mlx5e_xdp_info_fifo {
|
||||||
|
struct mlx5e_xdp_info *xi;
|
||||||
|
u32 *cc;
|
||||||
|
u32 *pc;
|
||||||
|
u32 mask;
|
||||||
|
};
|
||||||
|
|
||||||
struct mlx5e_xdpsq {
|
struct mlx5e_xdpsq {
|
||||||
/* data path */
|
/* data path */
|
||||||
|
|
||||||
/* dirtied @completion */
|
/* dirtied @completion */
|
||||||
|
u32 xdpi_fifo_cc;
|
||||||
u16 cc;
|
u16 cc;
|
||||||
bool redirect_flush;
|
bool redirect_flush;
|
||||||
|
|
||||||
/* dirtied @xmit */
|
/* dirtied @xmit */
|
||||||
u16 pc ____cacheline_aligned_in_smp;
|
u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
|
||||||
|
u16 pc;
|
||||||
struct mlx5_wqe_ctrl_seg *doorbell_cseg;
|
struct mlx5_wqe_ctrl_seg *doorbell_cseg;
|
||||||
|
|
||||||
struct mlx5e_cq cq;
|
struct mlx5e_cq cq;
|
||||||
|
@ -421,7 +430,7 @@ struct mlx5e_xdpsq {
|
||||||
struct mlx5_wq_cyc wq;
|
struct mlx5_wq_cyc wq;
|
||||||
struct mlx5e_xdpsq_stats *stats;
|
struct mlx5e_xdpsq_stats *stats;
|
||||||
struct {
|
struct {
|
||||||
struct mlx5e_xdp_info *xdpi;
|
struct mlx5e_xdp_info_fifo xdpi_fifo;
|
||||||
} db;
|
} db;
|
||||||
void __iomem *uar_map;
|
void __iomem *uar_map;
|
||||||
u32 sqn;
|
u32 sqn;
|
||||||
|
|
|
@ -149,20 +149,18 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
|
||||||
|
|
||||||
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
|
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
|
||||||
|
|
||||||
/* move page to reference to sq responsibility,
|
|
||||||
* and mark so it's not put back in page-cache.
|
|
||||||
*/
|
|
||||||
sq->db.xdpi[pi] = *xdpi;
|
|
||||||
sq->pc++;
|
sq->pc++;
|
||||||
|
|
||||||
sq->doorbell_cseg = cseg;
|
sq->doorbell_cseg = cseg;
|
||||||
|
|
||||||
|
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
|
||||||
stats->xmit++;
|
stats->xmit++;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||||
{
|
{
|
||||||
|
struct mlx5e_xdp_info_fifo *xdpi_fifo;
|
||||||
struct mlx5e_xdpsq *sq;
|
struct mlx5e_xdpsq *sq;
|
||||||
struct mlx5_cqe64 *cqe;
|
struct mlx5_cqe64 *cqe;
|
||||||
bool is_redirect;
|
bool is_redirect;
|
||||||
|
@ -179,6 +177,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
is_redirect = !rq;
|
is_redirect = !rq;
|
||||||
|
xdpi_fifo = &sq->db.xdpi_fifo;
|
||||||
|
|
||||||
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
|
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
|
||||||
* otherwise a cq overrun may occur
|
* otherwise a cq overrun may occur
|
||||||
|
@ -200,19 +199,19 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||||
get_cqe_opcode(cqe));
|
get_cqe_opcode(cqe));
|
||||||
|
|
||||||
do {
|
do {
|
||||||
u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
struct mlx5e_xdp_info xdpi =
|
||||||
struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
|
mlx5e_xdpi_fifo_pop(xdpi_fifo);
|
||||||
|
|
||||||
last_wqe = (sqcc == wqe_counter);
|
last_wqe = (sqcc == wqe_counter);
|
||||||
sqcc++;
|
sqcc++;
|
||||||
|
|
||||||
if (is_redirect) {
|
if (is_redirect) {
|
||||||
xdp_return_frame(xdpi->xdpf);
|
xdp_return_frame(xdpi.xdpf);
|
||||||
dma_unmap_single(sq->pdev, xdpi->dma_addr,
|
dma_unmap_single(sq->pdev, xdpi.dma_addr,
|
||||||
xdpi->xdpf->len, DMA_TO_DEVICE);
|
xdpi.xdpf->len, DMA_TO_DEVICE);
|
||||||
} else {
|
} else {
|
||||||
/* Recycle RX page */
|
/* Recycle RX page */
|
||||||
mlx5e_page_release(rq, &xdpi->di, true);
|
mlx5e_page_release(rq, &xdpi.di, true);
|
||||||
}
|
}
|
||||||
} while (!last_wqe);
|
} while (!last_wqe);
|
||||||
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
||||||
|
@ -230,21 +229,22 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
|
||||||
|
|
||||||
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
|
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
|
||||||
{
|
{
|
||||||
|
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
|
||||||
bool is_redirect = !rq;
|
bool is_redirect = !rq;
|
||||||
|
|
||||||
while (sq->cc != sq->pc) {
|
while (sq->cc != sq->pc) {
|
||||||
u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
|
struct mlx5e_xdp_info xdpi =
|
||||||
struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
|
mlx5e_xdpi_fifo_pop(xdpi_fifo);
|
||||||
|
|
||||||
sq->cc++;
|
sq->cc++;
|
||||||
|
|
||||||
if (is_redirect) {
|
if (is_redirect) {
|
||||||
xdp_return_frame(xdpi->xdpf);
|
xdp_return_frame(xdpi.xdpf);
|
||||||
dma_unmap_single(sq->pdev, xdpi->dma_addr,
|
dma_unmap_single(sq->pdev, xdpi.dma_addr,
|
||||||
xdpi->xdpf->len, DMA_TO_DEVICE);
|
xdpi.xdpf->len, DMA_TO_DEVICE);
|
||||||
} else {
|
} else {
|
||||||
/* Recycle RX page */
|
/* Recycle RX page */
|
||||||
mlx5e_page_release(rq, &xdpi->di, false);
|
mlx5e_page_release(rq, &xdpi.di, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,4 +57,19 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
|
||||||
|
struct mlx5e_xdp_info *xi)
|
||||||
|
{
|
||||||
|
u32 i = (*fifo->pc)++ & fifo->mask;
|
||||||
|
|
||||||
|
fifo->xi[i] = *xi;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct mlx5e_xdp_info
|
||||||
|
mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
|
||||||
|
{
|
||||||
|
return fifo->xi[(*fifo->cc)++ & fifo->mask];
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -992,18 +992,35 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
|
||||||
|
|
||||||
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
|
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
|
||||||
{
|
{
|
||||||
kvfree(sq->db.xdpi);
|
kvfree(sq->db.xdpi_fifo.xi);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
|
||||||
|
{
|
||||||
|
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
|
||||||
|
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
|
||||||
|
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
|
||||||
|
|
||||||
|
xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
|
||||||
|
GFP_KERNEL, numa);
|
||||||
|
if (!xdpi_fifo->xi)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
xdpi_fifo->pc = &sq->xdpi_fifo_pc;
|
||||||
|
xdpi_fifo->cc = &sq->xdpi_fifo_cc;
|
||||||
|
xdpi_fifo->mask = dsegs_per_wq - 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
|
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
|
||||||
{
|
{
|
||||||
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
|
int err;
|
||||||
|
|
||||||
sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
|
err = mlx5e_alloc_xdpsq_fifo(sq, numa);
|
||||||
GFP_KERNEL, numa);
|
if (err) {
|
||||||
if (!sq->db.xdpi) {
|
|
||||||
mlx5e_free_xdpsq_db(sq);
|
mlx5e_free_xdpsq_db(sq);
|
||||||
return -ENOMEM;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
Loading…
Reference in New Issue