net/tls: Fix wrong record sn in async mode of device resync

In async_resync mode, we log the TCP seq of records until the async request
is completed.  Later, in case one of the logged seqs matches the resync
request, we return it, together with its record serial number.  Before this
fix, we mistakenly returned the serial number of the current record
instead.

Fixes: ed9b7646b0 ("net/tls: Add asynchronous resync")
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Reviewed-by: Boris Pismenny <borisp@nvidia.com>
Link: https://lore.kernel.org/r/20201115131448.2702-1-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Tariq Toukan 2020-11-15 15:14:48 +02:00 committed by Jakub Kicinski
parent a5bbcbf290
commit 138559b9f9
2 changed files with 42 additions and 11 deletions

View File

@ -300,7 +300,8 @@ enum tls_offload_sync_type {
#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX 13
struct tls_offload_resync_async {
atomic64_t req;
u32 loglen;
u16 loglen;
u16 rcd_delta;
u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
};
@ -471,6 +472,18 @@ static inline bool tls_bigint_increment(unsigned char *seq, int len)
return (i == -1);
}
static inline void tls_bigint_subtract(unsigned char *seq, int n)
{
u64 rcd_sn;
__be64 *p;
BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8);
p = (__be64 *)seq;
rcd_sn = be64_to_cpu(*p);
*p = cpu_to_be64(rcd_sn - n);
}
static inline struct tls_context *tls_get_ctx(const struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@ -639,6 +652,7 @@ tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
rx_ctx->resync_async->loglen = 0;
rx_ctx->resync_async->rcd_delta = 0;
}
static inline void

View File

@ -694,36 +694,51 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
static bool
tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
s64 resync_req, u32 *seq)
s64 resync_req, u32 *seq, u16 *rcd_delta)
{
u32 is_async = resync_req & RESYNC_REQ_ASYNC;
u32 req_seq = resync_req >> 32;
u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
u16 i;
*rcd_delta = 0;
if (is_async) {
/* shouldn't get to wraparound:
* too long in async stage, something bad happened
*/
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
return false;
/* asynchronous stage: log all headers seq such that
* req_seq <= seq <= end_seq, and wait for real resync request
*/
if (between(*seq, req_seq, req_end) &&
if (before(*seq, req_seq))
return false;
if (!after(*seq, req_end) &&
resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
resync_async->log[resync_async->loglen++] = *seq;
resync_async->rcd_delta++;
return false;
}
/* synchronous stage: check against the logged entries and
* proceed to check the next entries if no match was found
*/
while (resync_async->loglen) {
if (req_seq == resync_async->log[resync_async->loglen - 1] &&
atomic64_try_cmpxchg(&resync_async->req,
&resync_req, 0)) {
resync_async->loglen = 0;
for (i = 0; i < resync_async->loglen; i++)
if (req_seq == resync_async->log[i] &&
atomic64_try_cmpxchg(&resync_async->req, &resync_req, 0)) {
*rcd_delta = resync_async->rcd_delta - i;
*seq = req_seq;
resync_async->loglen = 0;
resync_async->rcd_delta = 0;
return true;
}
resync_async->loglen--;
}
resync_async->loglen = 0;
resync_async->rcd_delta = 0;
if (req_seq == *seq &&
atomic64_try_cmpxchg(&resync_async->req,
@ -741,6 +756,7 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
u32 sock_data, is_req_pending;
struct tls_prot_info *prot;
s64 resync_req;
u16 rcd_delta;
u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW)
@ -786,8 +802,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
return;
if (!tls_device_rx_resync_async(rx_ctx->resync_async,
resync_req, &seq))
resync_req, &seq, &rcd_delta))
return;
tls_bigint_subtract(rcd_sn, rcd_delta);
break;
}