net/tls: Add force_resync for driver resync

This patch adds a field to the tls rx offload context which enables
drivers to force a send_resync call.

This field can be used by drivers to request a resync at the next
possible tls record. It is beneficial for hardware that provides the
resync sequence number asynchronously. In such cases, the packet that
triggered the resync does not contain the information required for a
resync. Instead, the driver requests resync for all the following
TLS record until the asynchronous notification with the resync request
TCP sequence arrives.

A following series for mlx5e ConnectX-6DX TLS RX offload support will
use this mechanism.

Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Tariq Toukan 2020-05-27 12:25:26 +03:00 committed by David S. Miller
parent bdad7f9441
commit b3ae2459f8
2 changed files with 17 additions and 4 deletions

View File

@ -594,12 +594,22 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
#endif #endif
/* The TLS context is valid until sk_destruct is called */ /* The TLS context is valid until sk_destruct is called */
#define RESYNC_REQ (1 << 0)
#define RESYNC_REQ_FORCE (1 << 1)
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
}
static inline void tls_offload_rx_force_resync_request(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE);
} }
static inline void static inline void

View File

@ -694,10 +694,11 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx; struct tls_offload_context_rx *rx_ctx;
bool is_req_pending, is_force_resync;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
u32 sock_data, is_req_pending;
struct tls_prot_info *prot; struct tls_prot_info *prot;
s64 resync_req; s64 resync_req;
u32 sock_data;
u32 req_seq; u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW) if (tls_ctx->rx_conf != TLS_HW)
@ -712,9 +713,11 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
resync_req = atomic64_read(&rx_ctx->resync_req); resync_req = atomic64_read(&rx_ctx->resync_req);
req_seq = resync_req >> 32; req_seq = resync_req >> 32;
seq += TLS_HEADER_SIZE - 1; seq += TLS_HEADER_SIZE - 1;
is_req_pending = resync_req; is_req_pending = resync_req & RESYNC_REQ;
is_force_resync = resync_req & RESYNC_REQ_FORCE;
if (likely(!is_req_pending) || req_seq != seq || if (likely(!is_req_pending) ||
(!is_force_resync && req_seq != seq) ||
!atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
return; return;
break; break;