net/mlx5e: RX, verify received packet size in Linear Striding RQ
In case of striding RQ, we use MPWRQ (Multi Packet WQE RQ), which means
that WQE (RX descriptor) can be used for many packets and so the WQE is
much bigger than MTU. In virtualization setups where the port mtu can
be larger than the vf mtu, if received packet is bigger than MTU, it
won't be dropped by HW on too small receive WQE. If we use linear SKB in
striding RQ, since each stride has room for mtu size payload and skb
info, an oversized packet can lead to crash for crossing allocated page
boundary upon the call to build_skb. So driver needs to check packet
size and drop it.
Introduce new SW rx counter, rx_oversize_pkts_sw_drop, which counts the
number of packets dropped by the driver for being too large.
As a new field is added to the RQ struct, re-open the channels whenever
this field is being used in datapath (i.e., in the case of linear
Striding RQ).
Fixes: 619a8f2a42
("net/mlx5e: Use linear SKB in Striding RQ")
Signed-off-by: Moshe Shemesh <moshe@mellanox.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
1392f44bba
commit
0073c8f727
|
@ -569,6 +569,7 @@ struct mlx5e_rq {
|
|||
|
||||
unsigned long state;
|
||||
int ix;
|
||||
unsigned int hw_mtu;
|
||||
|
||||
struct net_dim dim; /* Dynamic Interrupt Moderation */
|
||||
|
||||
|
|
|
@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|||
rq->channel = c;
|
||||
rq->ix = c->ix;
|
||||
rq->mdev = mdev;
|
||||
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
||||
rq->stats = &c->priv->channel_stats[c->ix].rq;
|
||||
|
||||
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
|
||||
|
@ -3766,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
|
|||
}
|
||||
|
||||
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
|
||||
bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
|
||||
u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
|
||||
u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
|
||||
|
||||
reset = reset && (ppw_old != ppw_new);
|
||||
reset = reset && (is_linear || (ppw_old != ppw_new));
|
||||
}
|
||||
|
||||
if (!reset) {
|
||||
|
|
|
@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
|
|||
u32 frag_size;
|
||||
bool consumed;
|
||||
|
||||
/* Check packet size. Note LRO doesn't use linear SKB */
|
||||
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
|
||||
rq->stats->oversize_pkts_sw_drop++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
va = page_address(di->page) + head_offset;
|
||||
data = va + rx_headroom;
|
||||
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
|
||||
|
|
|
@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
|
||||
|
@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
|||
s->rx_wqe_err += rq_stats->wqe_err;
|
||||
s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes;
|
||||
s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
|
||||
s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
|
||||
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
|
||||
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
|
||||
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
|
||||
|
@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
|
|||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
|
||||
|
|
|
@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
|
|||
u64 rx_wqe_err;
|
||||
u64 rx_mpwqe_filler_cqes;
|
||||
u64 rx_mpwqe_filler_strides;
|
||||
u64 rx_oversize_pkts_sw_drop;
|
||||
u64 rx_buff_alloc_err;
|
||||
u64 rx_cqe_compress_blks;
|
||||
u64 rx_cqe_compress_pkts;
|
||||
|
@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
|
|||
u64 wqe_err;
|
||||
u64 mpwqe_filler_cqes;
|
||||
u64 mpwqe_filler_strides;
|
||||
u64 oversize_pkts_sw_drop;
|
||||
u64 buff_alloc_err;
|
||||
u64 cqe_compress_blks;
|
||||
u64 cqe_compress_pkts;
|
||||
|
|
Loading…
Reference in New Issue