tls: rx: support optimistic decrypt to user buffer with TLS 1.3

We currently don't support decrypt to user buffer with TLS 1.3
because we don't know the record type and how much padding
record contains before decryption. In practice data records
are by far most common and padding gets used rarely so
we can assume data record, no padding, and if we find out
that wasn't the case - retry the crypto in place (decrypt
to skb).

To safeguard from user overwriting content type and padding
before we can check it attach a 1B sg entry where last byte
of the record will land.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2022-07-05 16:59:23 -07:00 committed by David S. Miller
parent 603380f54f
commit ce61327ce9

View file

@ -47,6 +47,7 @@
struct tls_decrypt_arg { struct tls_decrypt_arg {
bool zc; bool zc;
bool async; bool async;
u8 tail;
}; };
noinline void tls_err_abort(struct sock *sk, int err) noinline void tls_err_abort(struct sock *sk, int err)
@ -133,7 +134,8 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
return __skb_nsg(skb, offset, len, 0); return __skb_nsg(skb, offset, len, 0);
} }
static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb) static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
struct tls_decrypt_arg *darg)
{ {
struct strp_msg *rxm = strp_msg(skb); struct strp_msg *rxm = strp_msg(skb);
struct tls_msg *tlm = tls_msg(skb); struct tls_msg *tlm = tls_msg(skb);
@ -142,7 +144,7 @@ static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
/* Determine zero-padding length */ /* Determine zero-padding length */
if (prot->version == TLS_1_3_VERSION) { if (prot->version == TLS_1_3_VERSION) {
int offset = rxm->full_len - TLS_TAG_SIZE - 1; int offset = rxm->full_len - TLS_TAG_SIZE - 1;
char content_type = 0; char content_type = darg->zc ? darg->tail : 0;
int err; int err;
while (content_type == 0) { while (content_type == 0) {
@ -1418,17 +1420,18 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
struct strp_msg *rxm = strp_msg(skb); struct strp_msg *rxm = strp_msg(skb);
struct tls_msg *tlm = tls_msg(skb); struct tls_msg *tlm = tls_msg(skb);
int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0; int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
u8 *aad, *iv, *tail, *mem = NULL;
struct aead_request *aead_req; struct aead_request *aead_req;
struct sk_buff *unused; struct sk_buff *unused;
u8 *aad, *iv, *mem = NULL;
struct scatterlist *sgin = NULL; struct scatterlist *sgin = NULL;
struct scatterlist *sgout = NULL; struct scatterlist *sgout = NULL;
const int data_len = rxm->full_len - prot->overhead_size; const int data_len = rxm->full_len - prot->overhead_size;
int tail_pages = !!prot->tail_size;
int iv_offset = 0; int iv_offset = 0;
if (darg->zc && (out_iov || out_sg)) { if (darg->zc && (out_iov || out_sg)) {
if (out_iov) if (out_iov)
n_sgout = 1 + n_sgout = 1 + tail_pages +
iov_iter_npages_cap(out_iov, INT_MAX, data_len); iov_iter_npages_cap(out_iov, INT_MAX, data_len);
else else
n_sgout = sg_nents(out_sg); n_sgout = sg_nents(out_sg);
@ -1452,9 +1455,10 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
mem_size = aead_size + (nsg * sizeof(struct scatterlist)); mem_size = aead_size + (nsg * sizeof(struct scatterlist));
mem_size = mem_size + prot->aad_size; mem_size = mem_size + prot->aad_size;
mem_size = mem_size + MAX_IV_SIZE; mem_size = mem_size + MAX_IV_SIZE;
mem_size = mem_size + prot->tail_size;
/* Allocate a single block of memory which contains /* Allocate a single block of memory which contains
* aead_req || sgin[] || sgout[] || aad || iv. * aead_req || sgin[] || sgout[] || aad || iv || tail.
* This order achieves correct alignment for aead_req, sgin, sgout. * This order achieves correct alignment for aead_req, sgin, sgout.
*/ */
mem = kmalloc(mem_size, sk->sk_allocation); mem = kmalloc(mem_size, sk->sk_allocation);
@ -1467,6 +1471,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
sgout = sgin + n_sgin; sgout = sgin + n_sgin;
aad = (u8 *)(sgout + n_sgout); aad = (u8 *)(sgout + n_sgout);
iv = aad + prot->aad_size; iv = aad + prot->aad_size;
tail = iv + MAX_IV_SIZE;
/* For CCM based ciphers, first byte of nonce+iv is a constant */ /* For CCM based ciphers, first byte of nonce+iv is a constant */
switch (prot->cipher_type) { switch (prot->cipher_type) {
@ -1518,12 +1523,18 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
sg_init_table(sgout, n_sgout); sg_init_table(sgout, n_sgout);
sg_set_buf(&sgout[0], aad, prot->aad_size); sg_set_buf(&sgout[0], aad, prot->aad_size);
err = tls_setup_from_iter(out_iov, err = tls_setup_from_iter(out_iov, data_len,
data_len + prot->tail_size,
&pages, &sgout[1], &pages, &sgout[1],
(n_sgout - 1)); (n_sgout - 1 - tail_pages));
if (err < 0) if (err < 0)
goto fallback_to_reg_recv; goto fallback_to_reg_recv;
if (prot->tail_size) {
sg_unmark_end(&sgout[pages]);
sg_set_buf(&sgout[pages + 1], tail,
prot->tail_size);
sg_mark_end(&sgout[pages + 1]);
}
} else if (out_sg) { } else if (out_sg) {
memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
} else { } else {
@ -1542,6 +1553,9 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
if (darg->async) if (darg->async)
return 0; return 0;
if (prot->tail_size)
darg->tail = *tail;
/* Release the pages in case iov was mapped to pages */ /* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--) for (; pages > 0; pages--)
put_page(sg_page(&sgout[pages])); put_page(sg_page(&sgout[pages]));
@ -1583,9 +1597,15 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
return err; return err;
if (darg->async) if (darg->async)
goto decrypt_next; goto decrypt_next;
/* If opportunistic TLS 1.3 ZC failed retry without ZC */
if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
darg->tail != TLS_RECORD_TYPE_DATA)) {
darg->zc = false;
return decrypt_skb_update(sk, skb, dest, darg);
}
decrypt_done: decrypt_done:
pad = padding_length(prot, skb); pad = tls_padding_length(prot, skb, darg);
if (pad < 0) if (pad < 0)
return pad; return pad;