tcp: preserve const qualifier in tcp_sk()

We can change tcp_sk() to propagate its argument const qualifier,
thanks to container_of_const().

We have two places where a const sock pointer has to be upgraded
to a write one. We have been using const qualifier for lockless
listeners to clearly identify points where writes could happen.

Add tcp_sk_rw() helper to better document these.

tcp_inbound_md5_hash(), __tcp_grow_window(), tcp_reset_check()
and tcp_rack_reo_wnd() get an additional const qualififer
for their @tp local variables.

smc_check_reset_syn_req() also needs a similar change.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2023-03-17 15:55:39 +00:00 committed by David S. Miller
parent 403a40f230
commit e9d9da9154
7 changed files with 21 additions and 13 deletions

View File

@ -472,10 +472,12 @@ enum tsq_flags {
TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
}; };
static inline struct tcp_sock *tcp_sk(const struct sock *sk) #define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
{
return (struct tcp_sock *)sk; /* Variant of tcp_sk() upgrading a const sock to a read/write tcp socket.
} * Used in context of (lockless) tcp listeners.
*/
#define tcp_sk_rw(ptr) container_of(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
struct tcp_timewait_sock { struct tcp_timewait_sock {
struct inet_timewait_sock tw_sk; struct inet_timewait_sock tw_sk;

View File

@ -529,7 +529,7 @@ static inline void tcp_synq_overflow(const struct sock *sk)
last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
if (!time_between32(now, last_overflow, last_overflow + HZ)) if (!time_between32(now, last_overflow, last_overflow + HZ))
WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now); WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
} }
/* syncookies: no recent synqueue overflow on this listening socket? */ /* syncookies: no recent synqueue overflow on this listening socket? */

View File

@ -4570,7 +4570,7 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
const __u8 *hash_location = NULL; const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected; struct tcp_md5sig_key *hash_expected;
const struct tcphdr *th = tcp_hdr(skb); const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
int genhash, l3index; int genhash, l3index;
u8 newhash[16]; u8 newhash[16];

View File

@ -458,7 +458,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb, static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
unsigned int skbtruesize) unsigned int skbtruesize)
{ {
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
/* Optimize this! */ /* Optimize this! */
int truesize = tcp_win_from_space(sk, skbtruesize) >> 1; int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1; int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
@ -5693,7 +5693,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
*/ */
static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb) static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
(1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK | (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |

View File

@ -463,7 +463,7 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
} }
EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
static void smc_check_reset_syn_req(struct tcp_sock *oldtp, static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
struct request_sock *req, struct request_sock *req,
struct tcp_sock *newtp) struct tcp_sock *newtp)
{ {
@ -492,7 +492,8 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
const struct inet_request_sock *ireq = inet_rsk(req); const struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req); struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_connection_sock *newicsk; struct inet_connection_sock *newicsk;
struct tcp_sock *oldtp, *newtp; const struct tcp_sock *oldtp;
struct tcp_sock *newtp;
u32 seq; u32 seq;
if (!newsk) if (!newsk)

View File

@ -4127,8 +4127,13 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
if (!res) { if (!res) {
TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
if (unlikely(tcp_passive_fastopen(sk))) if (unlikely(tcp_passive_fastopen(sk))) {
tcp_sk(sk)->total_retrans++; /* sk has const attribute because listeners are lockless.
* However in this case, we are dealing with a passive fastopen
* socket thus we can change total_retrans value.
*/
tcp_sk_rw(sk)->total_retrans++;
}
trace_tcp_retransmit_synack(sk, req); trace_tcp_retransmit_synack(sk, req);
} }
return res; return res;

View File

@ -4,7 +4,7 @@
static u32 tcp_rack_reo_wnd(const struct sock *sk) static u32 tcp_rack_reo_wnd(const struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
if (!tp->reord_seen) { if (!tp->reord_seen) {
/* If reordering has not been observed, be aggressive during /* If reordering has not been observed, be aggressive during