cxgb4/chcr: Add ipv6 support and statistics

Adding ipv6 support and ktls related statistics.

v1->v2:
- added blank lines at 2 places.

v3->v4:
- Replaced atomic_t with atomic64_t
- added few necessary stat counters.

Signed-off-by: Rohit Maheshwari <rohitm@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Rohit Maheshwari 2020-03-07 20:06:08 +05:30 committed by David S. Miller
parent dc05f3df8f
commit 62370a4f34
4 changed files with 149 additions and 3 deletions

View file

@ -3,6 +3,7 @@
#ifdef CONFIG_CHELSIO_TLS_DEVICE #ifdef CONFIG_CHELSIO_TLS_DEVICE
#include "chcr_ktls.h" #include "chcr_ktls.h"
#include "clip_tbl.h"
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info); static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/* /*
@ -153,8 +154,10 @@ static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
/* FALLTHRU */ /* FALLTHRU */
case KTLS_CONN_SET_TCB_RPL: case KTLS_CONN_SET_TCB_RPL:
/* Check if l2t state is valid, then move to ready state. */ /* Check if l2t state is valid, then move to ready state. */
if (cxgb4_check_l2t_valid(tx_info->l2te)) if (cxgb4_check_l2t_valid(tx_info->l2te)) {
tx_info->connection_state = KTLS_CONN_TX_READY; tx_info->connection_state = KTLS_CONN_TX_READY;
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ctx);
}
break; break;
case KTLS_CONN_TX_READY: case KTLS_CONN_TX_READY:
@ -219,6 +222,56 @@ static int chcr_ktls_act_open_req(struct sock *sk,
return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te); return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
} }
/*
* chcr_ktls_act_open_req6: creates TCB entry for ipv6 connection.
* @sk - tcp socket.
* @tx_info - driver specific tls info.
* @atid - connection active tid.
* return - send success/failure.
*/
static int chcr_ktls_act_open_req6(struct sock *sk,
struct chcr_ktls_info *tx_info,
int atid)
{
struct inet_sock *inet = inet_sk(sk);
struct cpl_t6_act_open_req6 *cpl6;
struct cpl_act_open_req6 *cpl;
struct sk_buff *skb;
unsigned int len;
int qid_atid;
u64 options;
len = sizeof(*cpl6);
skb = alloc_skb(len, GFP_KERNEL);
if (unlikely(!skb))
return -ENOMEM;
/* mark it a control pkt */
set_wr_txq(skb, CPL_PRIORITY_CONTROL, tx_info->port_id);
cpl6 = __skb_put_zero(skb, len);
cpl = (struct cpl_act_open_req6 *)cpl6;
INIT_TP_WR(cpl6, 0);
qid_atid = TID_QID_V(tx_info->rx_qid) | TID_TID_V(atid);
OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_atid));
cpl->local_port = inet->inet_sport;
cpl->peer_port = inet->inet_dport;
cpl->local_ip_hi = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[0];
cpl->local_ip_lo = *(__be64 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8[8];
cpl->peer_ip_hi = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[0];
cpl->peer_ip_lo = *(__be64 *)&sk->sk_v6_daddr.in6_u.u6_addr8[8];
/* first 64 bit option field. */
options = TCAM_BYPASS_F | ULP_MODE_V(ULP_MODE_NONE) | NON_OFFLOAD_F |
SMAC_SEL_V(tx_info->smt_idx) | TX_CHAN_V(tx_info->tx_chan);
cpl->opt0 = cpu_to_be64(options);
/* next 64 bit option field. */
options =
TX_QUEUE_V(tx_info->adap->params.tp.tx_modq[tx_info->tx_chan]);
cpl->opt2 = htonl(options);
return cxgb4_l2t_send(tx_info->netdev, skb, tx_info->l2te);
}
/* /*
* chcr_setup_connection: create a TCB entry so that TP will form tcp packets. * chcr_setup_connection: create a TCB entry so that TP will form tcp packets.
* @sk - tcp socket. * @sk - tcp socket.
@ -245,7 +298,13 @@ static int chcr_setup_connection(struct sock *sk,
ret = chcr_ktls_act_open_req(sk, tx_info, atid); ret = chcr_ktls_act_open_req(sk, tx_info, atid);
} else { } else {
tx_info->ip_family = AF_INET6; tx_info->ip_family = AF_INET6;
ret = -EOPNOTSUPP; ret =
cxgb4_clip_get(tx_info->netdev,
(const u32 *)&sk->sk_v6_rcv_saddr.in6_u.u6_addr8,
1);
if (ret)
goto out;
ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
} }
/* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret /* if return type is NET_XMIT_CN, msg will be sent but delayed, mark ret
@ -322,23 +381,35 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
struct chcr_ktls_ofld_ctx_tx *tx_ctx = struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx); chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct sock *sk;
if (!tx_info) if (!tx_info)
return; return;
sk = tx_info->sk;
spin_lock(&tx_info->lock); spin_lock(&tx_info->lock);
tx_info->connection_state = KTLS_CONN_CLOSED; tx_info->connection_state = KTLS_CONN_CLOSED;
spin_unlock(&tx_info->lock); spin_unlock(&tx_info->lock);
/* clear l2t entry */
if (tx_info->l2te) if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te); cxgb4_l2t_release(tx_info->l2te);
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
cxgb4_clip_release(netdev,
(const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
1);
/* clear tid */
if (tx_info->tid != -1) { if (tx_info->tid != -1) {
/* clear tcb state and then release tid */ /* clear tcb state and then release tid */
chcr_ktls_mark_tcb_close(tx_info); chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family); tx_info->tid, tx_info->ip_family);
} }
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
kvfree(tx_info); kvfree(tx_info);
tx_ctx->chcr_info = NULL; tx_ctx->chcr_info = NULL;
} }
@ -424,7 +495,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) { ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)) {
memcpy(daaddr, &sk->sk_daddr, 4); memcpy(daaddr, &sk->sk_daddr, 4);
} else { } else {
goto out2; memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
} }
/* get the l2t index */ /* get the l2t index */
@ -458,10 +529,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (ret) if (ret)
goto out2; goto out2;
atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
return 0; return 0;
out2: out2:
kvfree(tx_info); kvfree(tx_info);
out: out:
atomic64_inc(&adap->chcr_stats.ktls_tx_connection_fail);
return ret; return ret;
} }
@ -729,6 +802,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
TCB_SND_UNA_RAW_V TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M), (TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0); TCB_SND_UNA_RAW_V(0), 0);
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ooo);
cpl++; cpl++;
} }
/* update ack */ /* update ack */
@ -1152,6 +1226,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
chcr_txq_advance(&q->q, ndesc); chcr_txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc); cxgb4_ring_tx_db(adap, &q->q, ndesc);
atomic64_inc(&adap->chcr_stats.ktls_tx_send_records);
return 0; return 0;
} }
@ -1562,6 +1637,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
/* check if it is a complete record */ /* check if it is a complete record */
if (tls_end_offset == record->len) { if (tls_end_offset == record->len) {
nskb = skb; nskb = skb;
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_complete_pkts);
} else { } else {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
@ -1579,6 +1655,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
*/ */
if (chcr_ktls_update_snd_una(tx_info, q)) if (chcr_ktls_update_snd_una(tx_info, q))
goto out; goto out;
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_end_pkts);
} }
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq, if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
@ -1649,6 +1726,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
/* free the last trimmed portion */ /* free the last trimmed portion */
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = tmp_skb; skb = tmp_skb;
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_trimmed_pkts);
} }
data_len = skb->data_len; data_len = skb->data_len;
/* check if the middle record's start point is 16 byte aligned. CTR /* check if the middle record's start point is 16 byte aligned. CTR
@ -1720,6 +1798,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
*/ */
if (chcr_ktls_update_snd_una(tx_info, q)) if (chcr_ktls_update_snd_una(tx_info, q))
goto out; goto out;
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_middle_pkts);
} else { } else {
/* Else means, its a partial first part of the record. Check if /* Else means, its a partial first part of the record. Check if
* its only the header, don't need to send for encryption then. * its only the header, don't need to send for encryption then.
@ -1734,6 +1813,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
} }
return 0; return 0;
} }
atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_start_pkts);
} }
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin, if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
@ -1755,6 +1835,7 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
struct tcphdr *th = tcp_hdr(skb); struct tcphdr *th = tcp_hdr(skb);
int data_len, qidx, ret = 0, mss; int data_len, qidx, ret = 0, mss;
struct tls_record_info *record; struct tls_record_info *record;
struct chcr_stats_debug *stats;
struct chcr_ktls_info *tx_info; struct chcr_ktls_info *tx_info;
u32 tls_end_offset, tcp_seq; u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx; struct tls_context *tls_ctx;
@ -1800,6 +1881,8 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
adap = tx_info->adap; adap = tx_info->adap;
stats = &adap->chcr_stats;
qidx = skb->queue_mapping; qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + tx_info->first_qset]; q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
cxgb4_reclaim_completed_tx(adap, &q->q, true); cxgb4_reclaim_completed_tx(adap, &q->q, true);
@ -1829,6 +1912,7 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* part of the record is received. Incase of partial end part of record, * part of the record is received. Incase of partial end part of record,
* we will send the complete record again. * we will send the complete record again.
*/ */
do { do {
int i; int i;
@ -1843,11 +1927,13 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
if (unlikely(!record)) { if (unlikely(!record)) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
goto out; goto out;
} }
if (unlikely(tls_record_is_start_marker(record))) { if (unlikely(tls_record_is_start_marker(record))) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
goto out; goto out;
} }
@ -1918,6 +2004,10 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0); } while (data_len > 0);
tx_info->prev_seq = ntohl(th->seq) + skb->data_len; tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
atomic64_inc(&stats->ktls_tx_encrypted_packets);
atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options /* tcp finish is set, send a separate tcp msg including all the options
* as well. * as well.
*/ */

View file

@ -11,6 +11,7 @@
#include "t4_tcb.h" #include "t4_tcb.h"
#include "l2t.h" #include "l2t.h"
#include "chcr_common.h" #include "chcr_common.h"
#include "cxgb4_uld.h"
#define CHCR_TCB_STATE_CLOSED 0 #define CHCR_TCB_STATE_CLOSED 0
#define CHCR_KTLS_KEY_CTX_LEN 16 #define CHCR_KTLS_KEY_CTX_LEN 16

View file

@ -3409,6 +3409,41 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.tls_pdu_rx)); atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n", seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key)); atomic_read(&adap->chcr_stats.tls_key));
#ifdef CONFIG_CHELSIO_TLS_DEVICE
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
seq_printf(seq, "Tx connection created: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_connection_open));
seq_printf(seq, "Tx connection failed: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_connection_fail));
seq_printf(seq, "Tx connection closed: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_connection_close));
seq_printf(seq, "Packets passed for encryption : %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_packets));
seq_printf(seq, "Bytes passed for encryption : %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_bytes));
seq_printf(seq, "Tx records send: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_send_records));
seq_printf(seq, "Tx partial start of records: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_start_pkts));
seq_printf(seq, "Tx partial middle of records: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_middle_pkts));
seq_printf(seq, "Tx partial end of record: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_end_pkts));
seq_printf(seq, "Tx complete records: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_trimmed_pkts));
seq_printf(seq, "Tx out of order packets: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_ooo));
seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_skip_no_sync_data));
seq_printf(seq, "Tx drop not synced packets: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_drop_no_sync_data));
seq_printf(seq, "Tx drop bypass req: %20llu\n",
atomic64_read(&adap->chcr_stats.ktls_tx_drop_bypass_req));
#endif
return 0; return 0;
} }

View file

@ -357,6 +357,26 @@ struct chcr_stats_debug {
atomic_t tls_pdu_tx; atomic_t tls_pdu_tx;
atomic_t tls_pdu_rx; atomic_t tls_pdu_rx;
atomic_t tls_key; atomic_t tls_key;
#ifdef CONFIG_CHELSIO_TLS_DEVICE
atomic64_t ktls_tx_connection_open;
atomic64_t ktls_tx_connection_fail;
atomic64_t ktls_tx_connection_close;
atomic64_t ktls_tx_send_records;
atomic64_t ktls_tx_end_pkts;
atomic64_t ktls_tx_start_pkts;
atomic64_t ktls_tx_middle_pkts;
atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts;
atomic64_t ktls_tx_encrypted_packets;
atomic64_t ktls_tx_encrypted_bytes;
atomic64_t ktls_tx_ctx;
atomic64_t ktls_tx_ooo;
atomic64_t ktls_tx_skip_no_sync_data;
atomic64_t ktls_tx_drop_no_sync_data;
atomic64_t ktls_tx_drop_bypass_req;
#endif
}; };
#define OCQ_WIN_OFFSET(pdev, vres) \ #define OCQ_WIN_OFFSET(pdev, vres) \