Revert "net/tls: Add force_resync for driver resync"

This reverts commit b3ae2459f8.
Revert the force resync API.
Not in use. To be replaced by a better async resync API downstream.

Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Reviewed-by: Maxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Boris Pismenny 2020-06-08 12:42:52 +03:00 committed by Saeed Mahameed
parent 1182f36593
commit acb5a07aaf
2 changed files with 4 additions and 17 deletions

View file

@ -607,22 +607,12 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
#endif #endif
/* The TLS context is valid until sk_destruct is called */ /* The TLS context is valid until sk_destruct is called */
#define RESYNC_REQ (1 << 0)
#define RESYNC_REQ_FORCE (1 << 1)
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
}
static inline void tls_offload_rx_force_resync_request(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE);
} }
static inline void static inline void

View file

@ -694,11 +694,10 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx; struct tls_offload_context_rx *rx_ctx;
bool is_req_pending, is_force_resync;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
u32 sock_data, is_req_pending;
struct tls_prot_info *prot; struct tls_prot_info *prot;
s64 resync_req; s64 resync_req;
u32 sock_data;
u32 req_seq; u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW) if (tls_ctx->rx_conf != TLS_HW)
@ -713,11 +712,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
resync_req = atomic64_read(&rx_ctx->resync_req); resync_req = atomic64_read(&rx_ctx->resync_req);
req_seq = resync_req >> 32; req_seq = resync_req >> 32;
seq += TLS_HEADER_SIZE - 1; seq += TLS_HEADER_SIZE - 1;
is_req_pending = resync_req & RESYNC_REQ; is_req_pending = resync_req;
is_force_resync = resync_req & RESYNC_REQ_FORCE;
if (likely(!is_req_pending) || if (likely(!is_req_pending) || req_seq != seq ||
(!is_force_resync && req_seq != seq) ||
!atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
return; return;
break; break;