mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
tls: fix race between tx work scheduling and socket close
commite01e3934a1
upstream. Similarly to previous commit, the submitting thread (recvmsg/sendmsg) may exit as soon as the async crypto handler calls complete(). Reorder scheduling the work before calling complete(). This seems more logical in the first place, as it's the inverse order of what the submitting thread will do. Reported-by: valis <sec@valis.email> Fixes:a42055e8d2
("net/tls: Add support for async encryption of records for performance") Signed-off-by: Jakub Kicinski <kuba@kernel.org> Reviewed-by: Simon Horman <horms@kernel.org> Reviewed-by: Sabrina Dubroca <sd@queasysnail.net> Signed-off-by: David S. Miller <davem@davemloft.net> [Lee: Fixed merge-conflict in Stable branches linux-6.1.y and older] Signed-off-by: Lee Jones <lee@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
36c676e2ed
commit
196f198ca6
1 changed files with 6 additions and 10 deletions
|
@ -449,7 +449,6 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
|
|||
struct scatterlist *sge;
|
||||
struct sk_msg *msg_en;
|
||||
struct tls_rec *rec;
|
||||
bool ready = false;
|
||||
struct sock *sk;
|
||||
|
||||
rec = container_of(aead_req, struct tls_rec, aead_req);
|
||||
|
@ -486,19 +485,16 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err)
|
|||
/* If received record is at head of tx_list, schedule tx */
|
||||
first_rec = list_first_entry(&ctx->tx_list,
|
||||
struct tls_rec, list);
|
||||
if (rec == first_rec)
|
||||
ready = true;
|
||||
if (rec == first_rec) {
|
||||
/* Schedule the transmission */
|
||||
if (!test_and_set_bit(BIT_TX_SCHEDULED,
|
||||
&ctx->tx_bitmask))
|
||||
schedule_delayed_work(&ctx->tx_work.work, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&ctx->encrypt_pending))
|
||||
complete(&ctx->async_wait.completion);
|
||||
|
||||
if (!ready)
|
||||
return;
|
||||
|
||||
/* Schedule the transmission */
|
||||
if (!test_and_set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
|
||||
schedule_delayed_work(&ctx->tx_work.work, 1);
|
||||
}
|
||||
|
||||
static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx)
|
||||
|
|
Loading…
Reference in a new issue