net/tls: Add support for async decryption of tls records

When tls records are decrypted using asynchronous acclerators such as
NXP CAAM engine, the crypto apis return -EINPROGRESS. Presently, on
getting -EINPROGRESS, the tls record processing stops till the time the
crypto accelerator finishes off and returns the result. This incurs a
context switch and is not an efficient way of accessing the crypto
accelerators. Crypto accelerators work efficient when they are queued
with multiple crypto jobs without having to wait for the previous ones
to complete.

The patch submits multiple crypto requests without having to wait for
for previous ones to complete. This has been implemented for records
which are decrypted in zero-copy mode. At the end of recvmsg(), we wait
for all the asynchronous decryption requests to complete.

The references to records which have been sent for async decryption are
dropped. For cases where record decryption is not possible in zero-copy
mode, asynchronous decryption is not used and we wait for decryption
crypto api to complete.

For crypto requests executing in async fashion, the memory for
aead_request, sglists and skb etc is freed from the decryption
completion handler. The decryption completion handler wakesup the
sleeping user context when recvmsg() flags that it has done sending
all the decryption requests and there are no more decryption requests
pending to be completed.

Signed-off-by: Vakul Garg <vakul.garg@nxp.com>
Reviewed-by: Dave Watson <davejwatson@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Vakul Garg 2018-08-29 15:26:55 +05:30 committed by David S. Miller
parent 181ab62311
commit 94524d8fc9
2 changed files with 127 additions and 13 deletions

View File

@ -124,6 +124,12 @@ struct tls_sw_context_rx {
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
atomic_t decrypt_pending;
bool async_notify;
};
struct decrypt_req_ctx {
struct sock *sk;
};
struct tls_record_info {

View File

@ -119,12 +119,50 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
return __skb_nsg(skb, offset, len, 0);
}
static void tls_decrypt_done(struct crypto_async_request *req, int err)
{
struct aead_request *aead_req = (struct aead_request *)req;
struct decrypt_req_ctx *req_ctx =
(struct decrypt_req_ctx *)(aead_req + 1);
struct scatterlist *sgout = aead_req->dst;
struct tls_context *tls_ctx = tls_get_ctx(req_ctx->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
int pending = atomic_dec_return(&ctx->decrypt_pending);
struct scatterlist *sg;
unsigned int pages;
/* Propagate if there was an err */
if (err) {
ctx->async_wait.err = err;
tls_err_abort(req_ctx->sk, err);
}
/* Release the skb, pages and memory allocated for crypto req */
kfree_skb(req->data);
/* Skip the first S/G entry as it points to AAD */
for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
if (!sg)
break;
put_page(sg_page(sg));
}
kfree(aead_req);
if (!pending && READ_ONCE(ctx->async_notify))
complete(&ctx->async_wait.completion);
}
static int tls_do_decryption(struct sock *sk,
struct sk_buff *skb,
struct scatterlist *sgin,
struct scatterlist *sgout,
char *iv_recv,
size_t data_len,
struct aead_request *aead_req)
struct aead_request *aead_req,
bool async)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
@ -135,10 +173,34 @@ static int tls_do_decryption(struct sock *sk,
aead_request_set_crypt(aead_req, sgin, sgout,
data_len + tls_ctx->rx.tag_size,
(u8 *)iv_recv);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->async_wait);
ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait);
if (async) {
struct decrypt_req_ctx *req_ctx;
req_ctx = (struct decrypt_req_ctx *)(aead_req + 1);
req_ctx->sk = sk;
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_decrypt_done, skb);
atomic_inc(&ctx->decrypt_pending);
} else {
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->async_wait);
}
ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS) {
if (async)
return ret;
ret = crypto_wait_req(ret, &ctx->async_wait);
}
if (async)
atomic_dec(&ctx->decrypt_pending);
return ret;
}
@ -841,7 +903,10 @@ fallback_to_reg_recv:
}
/* Prepare and submit AEAD request */
err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req);
err = tls_do_decryption(sk, skb, sgin, sgout, iv,
data_len, aead_req, *zc);
if (err == -EINPROGRESS)
return err;
/* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--)
@ -866,8 +931,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
#endif
if (!ctx->decrypted) {
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
if (err < 0)
if (err < 0) {
if (err == -EINPROGRESS)
tls_advance_record_sn(sk, &tls_ctx->rx);
return err;
}
} else {
*zc = false;
}
@ -895,18 +964,20 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = strp_msg(skb);
if (len < rxm->full_len) {
rxm->offset += len;
rxm->full_len -= len;
if (skb) {
struct strp_msg *rxm = strp_msg(skb);
return false;
if (len < rxm->full_len) {
rxm->offset += len;
rxm->full_len -= len;
return false;
}
kfree_skb(skb);
}
/* Finished with message */
ctx->recv_pkt = NULL;
kfree_skb(skb);
__strp_unpause(&ctx->strp);
return true;
@ -929,6 +1000,7 @@ int tls_sw_recvmsg(struct sock *sk,
int target, err = 0;
long timeo;
bool is_kvec = msg->msg_iter.type & ITER_KVEC;
int num_async = 0;
flags |= nonblock;
@ -941,6 +1013,7 @@ int tls_sw_recvmsg(struct sock *sk,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
bool zc = false;
bool async = false;
int chunk = 0;
skb = tls_wait_data(sk, flags, timeo, &err);
@ -948,6 +1021,7 @@ int tls_sw_recvmsg(struct sock *sk,
goto recv_end;
rxm = strp_msg(skb);
if (!cmsg) {
int cerr;
@ -974,26 +1048,39 @@ int tls_sw_recvmsg(struct sock *sk,
err = decrypt_skb_update(sk, skb, &msg->msg_iter,
&chunk, &zc);
if (err < 0) {
if (err < 0 && err != -EINPROGRESS) {
tls_err_abort(sk, EBADMSG);
goto recv_end;
}
if (err == -EINPROGRESS) {
async = true;
num_async++;
goto pick_next_record;
}
ctx->decrypted = true;
}
if (!zc) {
chunk = min_t(unsigned int, rxm->full_len, len);
err = skb_copy_datagram_msg(skb, rxm->offset, msg,
chunk);
if (err < 0)
goto recv_end;
}
pick_next_record:
copied += chunk;
len -= chunk;
if (likely(!(flags & MSG_PEEK))) {
u8 control = ctx->control;
/* For async, drop current skb reference */
if (async)
skb = NULL;
if (tls_sw_advance_skb(sk, skb, chunk)) {
/* Return full control message to
* userspace before trying to parse
@ -1002,14 +1089,33 @@ int tls_sw_recvmsg(struct sock *sk,
msg->msg_flags |= MSG_EOR;
if (control != TLS_RECORD_TYPE_DATA)
goto recv_end;
} else {
break;
}
}
/* If we have a new message from strparser, continue now. */
if (copied >= target && !ctx->recv_pkt)
break;
} while (len);
recv_end:
if (num_async) {
/* Wait for all previously submitted records to be decrypted */
smp_store_mb(ctx->async_notify, true);
if (atomic_read(&ctx->decrypt_pending)) {
err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
if (err) {
/* one of async decrypt failed */
tls_err_abort(sk, err);
copied = 0;
}
} else {
reinit_completion(&ctx->async_wait.completion);
}
WRITE_ONCE(ctx->async_notify, false);
}
release_sock(sk);
return copied ? : err;
}
@ -1349,6 +1455,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_aead;
if (sw_ctx_rx) {
(*aead)->reqsize = sizeof(struct decrypt_req_ctx);
/* Set up strparser */
memset(&cb, 0, sizeof(cb));
cb.rcv_msg = tls_queue;