mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
ch_ktls: Correction in finding correct length
There is a possibility of linear skbs coming in. Correcting
the length extraction logic.
v2->v3:
- Separated un-related changes from this patch.
Fixes: 5a4b9fe7fe
("cxgb4/chcr: complete record tx handling")
Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
9d2e5e9eeb
commit
b1b5cb1803
1 changed files with 8 additions and 7 deletions
|
@ -967,7 +967,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
|
|||
/* packet length = eth hdr len + ip hdr len + tcp hdr len
|
||||
* (including options).
|
||||
*/
|
||||
pktlen = skb->len - skb->data_len;
|
||||
pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
|
||||
ctrl = sizeof(*cpl) + pktlen;
|
||||
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
|
||||
|
@ -1860,6 +1860,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
|
|||
/* nic tls TX handler */
|
||||
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
|
||||
struct ch_ktls_port_stats_debug *port_stats;
|
||||
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
|
||||
struct ch_ktls_stats_debug *stats;
|
||||
|
@ -1867,7 +1868,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int data_len, qidx, ret = 0, mss;
|
||||
struct tls_record_info *record;
|
||||
struct chcr_ktls_info *tx_info;
|
||||
u32 tls_end_offset, tcp_seq;
|
||||
struct tls_context *tls_ctx;
|
||||
struct sk_buff *local_skb;
|
||||
struct sge_eth_txq *q;
|
||||
|
@ -1875,8 +1875,11 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
unsigned long flags;
|
||||
|
||||
tcp_seq = ntohl(th->seq);
|
||||
skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
|
||||
skb_data_len = skb->len - skb_offset;
|
||||
data_len = skb_data_len;
|
||||
|
||||
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len;
|
||||
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
|
||||
|
||||
tls_ctx = tls_get_ctx(skb->sk);
|
||||
if (unlikely(tls_ctx->netdev != dev))
|
||||
|
@ -1922,8 +1925,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* copy skb contents into local skb */
|
||||
chcr_ktls_skb_copy(skb, local_skb);
|
||||
|
||||
/* go through the skb and send only one record at a time. */
|
||||
data_len = skb->data_len;
|
||||
/* TCP segments can be in received either complete or partial.
|
||||
* chcr_end_part_handler will handle cases if complete record or end
|
||||
* part of the record is received. Incase of partial end part of record,
|
||||
|
@ -2020,9 +2021,9 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
} while (data_len > 0);
|
||||
|
||||
tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
|
||||
tx_info->prev_seq = ntohl(th->seq) + skb_data_len;
|
||||
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
|
||||
atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
|
||||
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
|
||||
|
||||
/* tcp finish is set, send a separate tcp msg including all the options
|
||||
* as well.
|
||||
|
|
Loading…
Reference in a new issue