trace: tcp: fully support trace_tcp_send_reset

Prior to this patch, what we can see by enabling trace_tcp_send is
only happening under two circumstances:
1) active rst mode
2) non-active rst mode and based on the full socket

That means the inconsistency occurs if we use tcpdump and trace
simultaneously to see how rst happens.

It's necessary that we should take into other cases into considerations,
say:
1) time-wait socket
2) no socket
...

By parsing the incoming skb and reversing its 4-tuple can
we know the exact 'flow' which might not exist.

Samples after applied this patch:
1. tcp_send_reset: skbaddr=XXX skaddr=XXX src=ip:port dest=ip:port
state=TCP_ESTABLISHED
2. tcp_send_reset: skbaddr=000...000 skaddr=XXX src=ip:port dest=ip:port
state=UNKNOWN
Note:
1) UNKNOWN means we cannot extract the right information from skb.
2) skbaddr/skaddr could be 0

Signed-off-by: Jason Xing <kernelxing@tencent.com>
Link: https://lore.kernel.org/r/20240401073605.37335-3-kerneljasonxing@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jason Xing 2024-04-01 15:36:05 +08:00 committed by Jakub Kicinski
parent 9807080e21
commit 19822a980e
3 changed files with 43 additions and 7 deletions

View file

@ -78,11 +78,47 @@ DEFINE_EVENT(tcp_event_sk_skb, tcp_retransmit_skb,
* skb of trace_tcp_send_reset is the skb that caused RST. In case of
* active reset, skb should be NULL
*/
DEFINE_EVENT(tcp_event_sk_skb, tcp_send_reset,
TRACE_EVENT(tcp_send_reset,
TP_PROTO(const struct sock *sk, const struct sk_buff *skb),
TP_ARGS(sk, skb)
TP_ARGS(sk, skb),
TP_STRUCT__entry(
__field(const void *, skbaddr)
__field(const void *, skaddr)
__field(int, state)
__array(__u8, saddr, sizeof(struct sockaddr_in6))
__array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->skaddr = sk;
/* Zero means unknown state. */
__entry->state = sk ? sk->sk_state : 0;
memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
if (sk && sk_fullsock(sk)) {
const struct inet_sock *inet = inet_sk(sk);
TP_STORE_ADDR_PORTS(__entry, inet, sk);
} else if (skb) {
const struct tcphdr *th = (const struct tcphdr *)skb->data;
/*
* We should reverse the 4-tuple of skb, so later
* it can print the right flow direction of rst.
*/
TP_STORE_ADDR_PORTS_SKB(skb, th, entry->daddr, entry->saddr);
}
),
TP_printk("skbaddr=%p skaddr=%p src=%pISpc dest=%pISpc state=%s",
__entry->skbaddr, __entry->skaddr,
__entry->saddr, __entry->daddr,
__entry->state ? show_tcp_state_name(__entry->state) : "UNKNOWN")
);
/*

View file

@ -866,11 +866,10 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
* routing might fail in this case. No choice here, if we choose to force
* input interface, we will misroute in case of asymmetric route.
*/
if (sk) {
if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
if (sk_fullsock(sk))
trace_tcp_send_reset(sk, skb);
}
trace_tcp_send_reset(sk, skb);
BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
offsetof(struct inet_timewait_sock, tw_bound_dev_if));

View file

@ -1113,7 +1113,6 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
if (sk) {
oif = sk->sk_bound_dev_if;
if (sk_fullsock(sk)) {
trace_tcp_send_reset(sk, skb);
if (inet6_test_bit(REPFLOW, sk))
label = ip6_flowlabel(ipv6h);
priority = READ_ONCE(sk->sk_priority);
@ -1129,6 +1128,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
label = ip6_flowlabel(ipv6h);
}
trace_tcp_send_reset(sk, skb);
tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
ipv6_get_dsfield(ipv6h), label, priority, txhash,
&key);