net/mlx5e: kTLS, Fix TX counters atomicity

Some TLS TX counters increment per socket/connection, and are not
protected against parallel modifications from several cores.
Switch them to atomic counters by taking them out of the SQ stats into
the global atomic TLS stats.

In this patch, we touch a single counter 'tx_tls_ctx' that counts the
number of device-offloaded TX TLS connections added.
Now that this counter can be increased without the for having the SQ
context in hand, move it to the mlx5e_ktls_add_tx() callback where it
really belongs, out of the fast data-path.

This change is not needed for counters that increment only in NAPI
context or under the TX lock, as they are already protected.
Keep them as tls_* counters under 'struct mlx5e_sq_stats'.

Fixes: d2ead1f360 ("net/mlx5e: Add kTLS TX HW offload support")
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
This commit is contained in:
Tariq Toukan 2021-02-28 13:50:17 +02:00 committed by Saeed Mahameed
parent e929e3da53
commit a51bce9698
5 changed files with 33 additions and 26 deletions

View File

@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies. // Copyright (c) 2019 Mellanox Technologies.
#include "en_accel/tls.h"
#include "en_accel/ktls_txrx.h" #include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h" #include "en_accel/ktls_utils.h"
@ -50,6 +51,7 @@ static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
struct mlx5e_ktls_offload_context_tx { struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx; struct tls_offload_context_tx *tx_ctx;
struct tls12_crypto_info_aes_gcm_128 crypto_info; struct tls12_crypto_info_aes_gcm_128 crypto_info;
struct mlx5e_tls_sw_stats *sw_stats;
u32 expected_seq; u32 expected_seq;
u32 tisn; u32 tisn;
u32 key_id; u32 key_id;
@ -99,6 +101,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (err) if (err)
goto err_create_key; goto err_create_key;
priv_tx->sw_stats = &priv->tls->sw_stats;
priv_tx->expected_seq = start_offload_tcp_sn; priv_tx->expected_seq = start_offload_tcp_sn;
priv_tx->crypto_info = priv_tx->crypto_info =
*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
@ -111,6 +114,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
goto err_create_tis; goto err_create_tis;
priv_tx->ctx_post_pending = true; priv_tx->ctx_post_pending = true;
atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
return 0; return 0;
@ -452,7 +456,6 @@ bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *s
if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) { if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false); mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
stats->tls_ctx++;
} }
seq = ntohl(tcp_hdr(skb)->seq); seq = ntohl(tcp_hdr(skb)->seq);

View File

@ -41,6 +41,7 @@
#include "en.h" #include "en.h"
struct mlx5e_tls_sw_stats { struct mlx5e_tls_sw_stats {
atomic64_t tx_tls_ctx;
atomic64_t tx_tls_drop_metadata; atomic64_t tx_tls_drop_metadata;
atomic64_t tx_tls_drop_resync_alloc; atomic64_t tx_tls_drop_resync_alloc;
atomic64_t tx_tls_drop_no_sync_data; atomic64_t tx_tls_drop_no_sync_data;

View File

@ -45,49 +45,58 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) }, { MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_drop_bypass_required) },
}; };
static const struct counter_desc mlx5e_ktls_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_tls_sw_stats, tx_tls_ctx) },
};
#define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \ #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset)) atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
#define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
static bool is_tls_atomic_stats(struct mlx5e_priv *priv)
{ {
return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev); if (!priv->tls)
return NULL;
if (mlx5_accel_is_ktls_device(priv->mdev))
return mlx5e_ktls_sw_stats_desc;
return mlx5e_tls_sw_stats_desc;
} }
int mlx5e_tls_get_count(struct mlx5e_priv *priv) int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{ {
if (!is_tls_atomic_stats(priv)) if (!priv->tls)
return 0; return 0;
if (mlx5_accel_is_ktls_device(priv->mdev))
return NUM_TLS_SW_COUNTERS; return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
} }
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
{ {
unsigned int i, idx = 0; const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
if (!is_tls_atomic_stats(priv)) stats_desc = get_tls_atomic_stats(priv);
return 0; n = mlx5e_tls_get_count(priv);
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) for (i = 0; i < n; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, strcpy(data + (idx++) * ETH_GSTRING_LEN,
mlx5e_tls_sw_stats_desc[i].format); stats_desc[i].format);
return NUM_TLS_SW_COUNTERS; return n;
} }
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data)
{ {
int i, idx = 0; const struct counter_desc *stats_desc;
unsigned int i, n, idx = 0;
if (!is_tls_atomic_stats(priv)) stats_desc = get_tls_atomic_stats(priv);
return 0; n = mlx5e_tls_get_count(priv);
for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) for (i = 0; i < n; i++)
data[idx++] = data[idx++] =
MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats, MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
mlx5e_tls_sw_stats_desc, i); stats_desc, i);
return NUM_TLS_SW_COUNTERS; return n;
} }

View File

@ -116,7 +116,6 @@ static const struct counter_desc sw_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
@ -390,7 +389,6 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets; s->tx_tls_encrypted_packets += sq_stats->tls_encrypted_packets;
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo; s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes; s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets; s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
@ -1650,7 +1648,6 @@ static const struct counter_desc sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
@ -1776,7 +1773,6 @@ static const struct counter_desc qos_sq_stats_desc[] = {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },

View File

@ -191,7 +191,6 @@ struct mlx5e_sw_stats {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes; u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
u64 tx_tls_ooo; u64 tx_tls_ooo;
u64 tx_tls_dump_packets; u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes; u64 tx_tls_dump_bytes;
@ -364,7 +363,6 @@ struct mlx5e_sq_stats {
#ifdef CONFIG_MLX5_EN_TLS #ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets; u64 tls_encrypted_packets;
u64 tls_encrypted_bytes; u64 tls_encrypted_bytes;
u64 tls_ctx;
u64 tls_ooo; u64 tls_ooo;
u64 tls_dump_packets; u64 tls_dump_packets;
u64 tls_dump_bytes; u64 tls_dump_bytes;