net/mlx5e: Extend the stats group API to have update_stats()
Extend the stats group API to have an update_stats() callback which will be used to fetch the hardware or software counters data. Signed-off-by: Kamal Heib <kamalh@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
a89842811e
commit
1938617735
|
@ -841,7 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
|
|||
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
|
||||
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
|
||||
|
||||
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full);
|
||||
void mlx5e_update_stats(struct mlx5e_priv *priv);
|
||||
|
||||
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
|
||||
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
|
||||
|
|
|
@ -207,7 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
|
|||
return;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5e_update_stats(priv, true);
|
||||
mlx5e_update_stats(priv);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
for (i = 0; i < mlx5e_num_stats_grps; i++)
|
||||
|
|
|
@ -173,185 +173,23 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
|
|||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
|
||||
void mlx5e_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_sw_stats temp, *s = &temp;
|
||||
struct mlx5e_rq_stats *rq_stats;
|
||||
struct mlx5e_sq_stats *sq_stats;
|
||||
struct mlx5e_ch_stats *ch_stats;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
for (i = 0; i < priv->channels.num; i++) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
|
||||
rq_stats = &c->rq.stats;
|
||||
ch_stats = &c->stats;
|
||||
|
||||
s->rx_packets += rq_stats->packets;
|
||||
s->rx_bytes += rq_stats->bytes;
|
||||
s->rx_lro_packets += rq_stats->lro_packets;
|
||||
s->rx_lro_bytes += rq_stats->lro_bytes;
|
||||
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
|
||||
s->rx_csum_none += rq_stats->csum_none;
|
||||
s->rx_csum_complete += rq_stats->csum_complete;
|
||||
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
|
||||
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
|
||||
s->rx_xdp_drop += rq_stats->xdp_drop;
|
||||
s->rx_xdp_tx += rq_stats->xdp_tx;
|
||||
s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
|
||||
s->rx_wqe_err += rq_stats->wqe_err;
|
||||
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
|
||||
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
|
||||
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
|
||||
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
|
||||
s->rx_page_reuse += rq_stats->page_reuse;
|
||||
s->rx_cache_reuse += rq_stats->cache_reuse;
|
||||
s->rx_cache_full += rq_stats->cache_full;
|
||||
s->rx_cache_empty += rq_stats->cache_empty;
|
||||
s->rx_cache_busy += rq_stats->cache_busy;
|
||||
s->rx_cache_waive += rq_stats->cache_waive;
|
||||
s->ch_eq_rearm += ch_stats->eq_rearm;
|
||||
|
||||
for (j = 0; j < priv->channels.params.num_tc; j++) {
|
||||
sq_stats = &c->sq[j].stats;
|
||||
|
||||
s->tx_packets += sq_stats->packets;
|
||||
s->tx_bytes += sq_stats->bytes;
|
||||
s->tx_tso_packets += sq_stats->tso_packets;
|
||||
s->tx_tso_bytes += sq_stats->tso_bytes;
|
||||
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
|
||||
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
|
||||
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
|
||||
s->tx_queue_stopped += sq_stats->stopped;
|
||||
s->tx_queue_wake += sq_stats->wake;
|
||||
s->tx_queue_dropped += sq_stats->dropped;
|
||||
s->tx_xmit_more += sq_stats->xmit_more;
|
||||
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
|
||||
s->tx_csum_none += sq_stats->csum_none;
|
||||
s->tx_csum_partial += sq_stats->csum_partial;
|
||||
}
|
||||
}
|
||||
|
||||
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
|
||||
priv->stats.pport.phy_counters,
|
||||
counter_set.phys_layer_cntrs.link_down_events);
|
||||
memcpy(&priv->stats.sw, s, sizeof(*s));
|
||||
}
|
||||
|
||||
static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
|
||||
{
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
|
||||
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
|
||||
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
MLX5_SET(query_vport_counter_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
|
||||
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
|
||||
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
|
||||
|
||||
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
int prio;
|
||||
void *out;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||
|
||||
out = pstats->IEEE_802_3_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
|
||||
if (!full)
|
||||
return;
|
||||
|
||||
out = pstats->RFC_2863_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
|
||||
out = pstats->RFC_2819_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
|
||||
out = pstats->phy_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
|
||||
if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
|
||||
out = pstats->phy_statistical_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
|
||||
if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
|
||||
out = pstats->eth_ext_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
out = pstats->per_prio_counters[prio];
|
||||
MLX5_SET(ppcnt_reg, in, prio_tc, prio);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz,
|
||||
MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
|
||||
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
|
||||
int err;
|
||||
|
||||
if (!priv->q_counter)
|
||||
return;
|
||||
|
||||
err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
|
||||
if (err)
|
||||
return;
|
||||
|
||||
qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
|
||||
}
|
||||
|
||||
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
|
||||
void *out;
|
||||
|
||||
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
|
||||
return;
|
||||
|
||||
out = pcie_stats->pcie_perf_counters;
|
||||
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||
}
|
||||
|
||||
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
|
||||
{
|
||||
if (full) {
|
||||
mlx5e_update_pcie_counters(priv);
|
||||
mlx5e_ipsec_update_stats(priv);
|
||||
}
|
||||
mlx5e_update_pport_counters(priv, full);
|
||||
mlx5e_update_vport_counters(priv);
|
||||
mlx5e_update_q_counter(priv);
|
||||
mlx5e_update_sw_counters(priv);
|
||||
for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
|
||||
if (mlx5e_stats_grps[i].update_stats)
|
||||
mlx5e_stats_grps[i].update_stats(priv);
|
||||
}
|
||||
|
||||
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_update_stats(priv, false);
|
||||
int i;
|
||||
|
||||
for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
|
||||
if (mlx5e_stats_grps[i].update_stats_mask &
|
||||
MLX5E_NDO_UPDATE_STATS)
|
||||
mlx5e_stats_grps[i].update_stats(priv);
|
||||
}
|
||||
|
||||
void mlx5e_update_stats_work(struct work_struct *work)
|
||||
|
|
|
@ -100,6 +100,72 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_sw_stats temp, *s = &temp;
|
||||
struct mlx5e_rq_stats *rq_stats;
|
||||
struct mlx5e_sq_stats *sq_stats;
|
||||
struct mlx5e_ch_stats *ch_stats;
|
||||
int i, j;
|
||||
|
||||
memset(s, 0, sizeof(*s));
|
||||
for (i = 0; i < priv->channels.num; i++) {
|
||||
struct mlx5e_channel *c = priv->channels.c[i];
|
||||
|
||||
rq_stats = &c->rq.stats;
|
||||
ch_stats = &c->stats;
|
||||
|
||||
s->rx_packets += rq_stats->packets;
|
||||
s->rx_bytes += rq_stats->bytes;
|
||||
s->rx_lro_packets += rq_stats->lro_packets;
|
||||
s->rx_lro_bytes += rq_stats->lro_bytes;
|
||||
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
|
||||
s->rx_csum_none += rq_stats->csum_none;
|
||||
s->rx_csum_complete += rq_stats->csum_complete;
|
||||
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
|
||||
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
|
||||
s->rx_xdp_drop += rq_stats->xdp_drop;
|
||||
s->rx_xdp_tx += rq_stats->xdp_tx;
|
||||
s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
|
||||
s->rx_wqe_err += rq_stats->wqe_err;
|
||||
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
|
||||
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
|
||||
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
|
||||
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
|
||||
s->rx_page_reuse += rq_stats->page_reuse;
|
||||
s->rx_cache_reuse += rq_stats->cache_reuse;
|
||||
s->rx_cache_full += rq_stats->cache_full;
|
||||
s->rx_cache_empty += rq_stats->cache_empty;
|
||||
s->rx_cache_busy += rq_stats->cache_busy;
|
||||
s->rx_cache_waive += rq_stats->cache_waive;
|
||||
s->ch_eq_rearm += ch_stats->eq_rearm;
|
||||
|
||||
for (j = 0; j < priv->channels.params.num_tc; j++) {
|
||||
sq_stats = &c->sq[j].stats;
|
||||
|
||||
s->tx_packets += sq_stats->packets;
|
||||
s->tx_bytes += sq_stats->bytes;
|
||||
s->tx_tso_packets += sq_stats->tso_packets;
|
||||
s->tx_tso_bytes += sq_stats->tso_bytes;
|
||||
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
|
||||
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
|
||||
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
|
||||
s->tx_queue_stopped += sq_stats->stopped;
|
||||
s->tx_queue_wake += sq_stats->wake;
|
||||
s->tx_queue_dropped += sq_stats->dropped;
|
||||
s->tx_xmit_more += sq_stats->xmit_more;
|
||||
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
|
||||
s->tx_csum_none += sq_stats->csum_none;
|
||||
s->tx_csum_partial += sq_stats->csum_partial;
|
||||
}
|
||||
}
|
||||
|
||||
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
|
||||
priv->stats.pport.phy_counters,
|
||||
counter_set.phys_layer_cntrs.link_down_events);
|
||||
memcpy(&priv->stats.sw, s, sizeof(*s));
|
||||
}
|
||||
|
||||
static const struct counter_desc q_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
|
||||
};
|
||||
|
@ -129,6 +195,22 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_q_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
|
||||
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
|
||||
int err;
|
||||
|
||||
if (!priv->q_counter)
|
||||
return;
|
||||
|
||||
err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
|
||||
if (err)
|
||||
return;
|
||||
|
||||
qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
|
||||
}
|
||||
|
||||
#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
|
||||
static const struct counter_desc vport_stats_desc[] = {
|
||||
{ "rx_vport_unicast_packets",
|
||||
|
@ -201,6 +283,19 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_vport_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
|
||||
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
|
||||
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
|
||||
MLX5_SET(query_vport_counter_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_COUNTER);
|
||||
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
|
||||
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
|
||||
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
#define PPORT_802_3_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
|
||||
|
@ -253,6 +348,20 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
void *out;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||
out = pstats->IEEE_802_3_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
|
||||
#define PPORT_2863_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
|
||||
|
@ -290,6 +399,20 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_2863_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
void *out;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||
out = pstats->RFC_2863_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
|
||||
#define PPORT_2819_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
|
||||
|
@ -337,6 +460,20 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
void *out;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||
out = pstats->RFC_2819_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
|
||||
#define PPORT_PHY_STATISTICAL_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.phys_layer_statistical_cntrs.c##_high)
|
||||
|
@ -377,6 +514,27 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_phy_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
void *out;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||
out = pstats->phy_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
|
||||
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
|
||||
return;
|
||||
|
||||
out = pstats->phy_statistical_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
|
||||
#define PPORT_ETH_EXT_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
|
||||
|
@ -419,6 +577,23 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_eth_ext_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
void *out;
|
||||
|
||||
if (!MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters))
|
||||
return;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||
out = pstats->eth_ext_counters;
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
|
||||
#define PCIE_PERF_OFF(c) \
|
||||
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
|
||||
static const struct counter_desc pcie_perf_stats_desc[] = {
|
||||
|
@ -506,6 +681,22 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_pcie_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
|
||||
void *out;
|
||||
|
||||
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
|
||||
return;
|
||||
|
||||
out = pcie_stats->pcie_perf_counters;
|
||||
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||
}
|
||||
|
||||
#define PPORT_PER_PRIO_OFF(c) \
|
||||
MLX5_BYTE_OFF(ppcnt_reg, \
|
||||
counter_set.eth_per_prio_grp_data_layout.c##_high)
|
||||
|
@ -679,6 +870,25 @@ static int mlx5e_grp_per_prio_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
|
||||
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
|
||||
int prio;
|
||||
void *out;
|
||||
|
||||
MLX5_SET(ppcnt_reg, in, local_port, 1);
|
||||
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
out = pstats->per_prio_counters[prio];
|
||||
MLX5_SET(ppcnt_reg, in, prio_tc, prio);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz,
|
||||
MLX5_REG_PPCNT, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct counter_desc mlx5e_pme_status_desc[] = {
|
||||
{ "module_unplug", 8 },
|
||||
};
|
||||
|
@ -746,6 +956,11 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx + mlx5e_ipsec_get_stats(priv, data + idx);
|
||||
}
|
||||
|
||||
static void mlx5e_grp_ipsec_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_ipsec_update_stats(priv);
|
||||
}
|
||||
|
||||
static const struct counter_desc rq_stats_desc[] = {
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
|
||||
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
|
||||
|
@ -863,56 +1078,71 @@ static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
|
|||
return idx;
|
||||
}
|
||||
|
||||
/* The stats groups order is opposite to the update_stats() order calls */
|
||||
const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_sw_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_sw_fill_strings,
|
||||
.fill_stats = mlx5e_grp_sw_fill_stats,
|
||||
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
|
||||
.update_stats = mlx5e_grp_sw_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_q_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_q_fill_strings,
|
||||
.fill_stats = mlx5e_grp_q_fill_stats,
|
||||
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
|
||||
.update_stats = mlx5e_grp_q_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_vport_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_vport_fill_strings,
|
||||
.fill_stats = mlx5e_grp_vport_fill_stats,
|
||||
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
|
||||
.update_stats = mlx5e_grp_vport_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_802_3_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_802_3_fill_strings,
|
||||
.fill_stats = mlx5e_grp_802_3_fill_stats,
|
||||
.update_stats_mask = MLX5E_NDO_UPDATE_STATS,
|
||||
.update_stats = mlx5e_grp_802_3_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_2863_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_2863_fill_strings,
|
||||
.fill_stats = mlx5e_grp_2863_fill_stats,
|
||||
.update_stats = mlx5e_grp_2863_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_2819_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_2819_fill_strings,
|
||||
.fill_stats = mlx5e_grp_2819_fill_stats,
|
||||
.update_stats = mlx5e_grp_2819_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_phy_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_phy_fill_strings,
|
||||
.fill_stats = mlx5e_grp_phy_fill_stats,
|
||||
.update_stats = mlx5e_grp_phy_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_eth_ext_fill_strings,
|
||||
.fill_stats = mlx5e_grp_eth_ext_fill_stats,
|
||||
.update_stats = mlx5e_grp_eth_ext_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_pcie_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_pcie_fill_strings,
|
||||
.fill_stats = mlx5e_grp_pcie_fill_stats,
|
||||
.update_stats = mlx5e_grp_pcie_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_per_prio_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_per_prio_fill_strings,
|
||||
.fill_stats = mlx5e_grp_per_prio_fill_stats,
|
||||
.update_stats = mlx5e_grp_per_prio_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_pme_get_num_stats,
|
||||
|
@ -923,6 +1153,7 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
|
|||
.get_num_stats = mlx5e_grp_ipsec_get_num_stats,
|
||||
.fill_strings = mlx5e_grp_ipsec_fill_strings,
|
||||
.fill_stats = mlx5e_grp_ipsec_fill_stats,
|
||||
.update_stats = mlx5e_grp_ipsec_update_stats,
|
||||
},
|
||||
{
|
||||
.get_num_stats = mlx5e_grp_channels_get_num_stats,
|
||||
|
|
|
@ -207,11 +207,17 @@ struct mlx5e_stats {
|
|||
struct mlx5e_pcie_stats pcie;
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5E_NDO_UPDATE_STATS = BIT(0x1),
|
||||
};
|
||||
|
||||
struct mlx5e_priv;
|
||||
struct mlx5e_stats_grp {
|
||||
u16 update_stats_mask;
|
||||
int (*get_num_stats)(struct mlx5e_priv *priv);
|
||||
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
|
||||
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
|
||||
void (*update_stats)(struct mlx5e_priv *priv);
|
||||
};
|
||||
|
||||
extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
|
||||
|
|
Loading…
Reference in New Issue