Merge branch 'net-sk_err-lockless-annotate'

Eric Dumazet says:

====================
net: annotate lockless accesses to sk_err[_soft]

This patch series is inspired by yet another syzbot report.

Most poll() handlers are lockless and read sk->sk_err
while other cpus can change it.

Add READ_ONCE/WRITE_ONCE() to major/usual offenders.

More to come later.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2023-03-17 08:25:06 +00:00
commit ec4040ae5f
20 changed files with 63 additions and 56 deletions

View File

@ -601,7 +601,7 @@ static void lowcomms_error_report(struct sock *sk)
"sk_err=%d/%d\n", dlm_our_nodeid(),
con->nodeid, &inet->inet_daddr,
ntohs(inet->inet_dport), sk->sk_err,
sk->sk_err_soft);
READ_ONCE(sk->sk_err_soft));
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
@ -610,14 +610,15 @@ static void lowcomms_error_report(struct sock *sk)
"dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
con->nodeid, &sk->sk_v6_daddr,
ntohs(inet->inet_dport), sk->sk_err,
sk->sk_err_soft);
READ_ONCE(sk->sk_err_soft));
break;
#endif
default:
printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
"invalid socket family %d set, "
"sk_err=%d/%d\n", dlm_our_nodeid(),
sk->sk_family, sk->sk_err, sk->sk_err_soft);
sk->sk_family, sk->sk_err,
READ_ONCE(sk->sk_err_soft));
break;
}

View File

@ -125,7 +125,7 @@ as_indicate_complete:
break;
case as_addparty:
case as_dropparty:
sk->sk_err_soft = -msg->reply;
WRITE_ONCE(sk->sk_err_soft, -msg->reply);
/* < 0 failure, otherwise ep_ref */
clear_bit(ATM_VF_WAITING, &vcc->flags);
break;

View File

@ -177,7 +177,7 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
mtu = dst_mtu(dst);
@ -339,8 +339,9 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
} else {
WRITE_ONCE(sk->sk_err_soft, err);
}
goto out;
}
@ -364,8 +365,9 @@ static int dccp_v4_err(struct sk_buff *skb, u32 info)
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk_error_report(sk);
} else /* Only an error on timeout */
sk->sk_err_soft = err;
} else { /* Only an error on timeout */
WRITE_ONCE(sk->sk_err_soft, err);
}
out:
bh_unlock_sock(sk);
sock_put(sk);

View File

@ -174,17 +174,18 @@ static int dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
*/
sk_error_report(sk);
dccp_done(sk);
} else
sk->sk_err_soft = err;
} else {
WRITE_ONCE(sk->sk_err_soft, err);
}
goto out;
}
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
sk_error_report(sk);
} else
sk->sk_err_soft = err;
} else {
WRITE_ONCE(sk->sk_err_soft, err);
}
out:
bh_unlock_sock(sk);
sock_put(sk);

View File

@ -19,7 +19,7 @@ int sysctl_dccp_retries2 __read_mostly = TCP_RETR2;
static void dccp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
sk->sk_err = READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT;
sk_error_report(sk);
dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);

View File

@ -1322,7 +1322,7 @@ int inet_sk_rebuild_header(struct sock *sk)
sk->sk_state != TCP_SYN_SENT ||
(sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
(err = inet_sk_reselect_saddr(sk)) != 0)
sk->sk_err_soft = -err;
WRITE_ONCE(sk->sk_err_soft, -err);
}
return err;

View File

@ -589,7 +589,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
}
/* This barrier is coupled with smp_wmb() in tcp_reset() */
smp_rmb();
if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
if (READ_ONCE(sk->sk_err) ||
!skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR;
return mask;
@ -3094,7 +3095,7 @@ int tcp_disconnect(struct sock *sk, int flags)
if (old_state == TCP_LISTEN) {
inet_csk_listen_stop(sk);
} else if (unlikely(tp->repair)) {
sk->sk_err = ECONNABORTED;
WRITE_ONCE(sk->sk_err, ECONNABORTED);
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@ -3102,9 +3103,9 @@ int tcp_disconnect(struct sock *sk, int flags)
* states
*/
tcp_send_active_reset(sk, gfp_any());
sk->sk_err = ECONNRESET;
WRITE_ONCE(sk->sk_err, ECONNRESET);
} else if (old_state == TCP_SYN_SENT)
sk->sk_err = ECONNRESET;
WRITE_ONCE(sk->sk_err, ECONNRESET);
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
@ -4692,7 +4693,7 @@ int tcp_abort(struct sock *sk, int err)
bh_lock_sock(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_err = err;
WRITE_ONCE(sk->sk_err, err);
/* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb();
sk_error_report(sk);

View File

@ -3874,7 +3874,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
sk->sk_err_soft = 0;
WRITE_ONCE(sk->sk_err_soft, 0);
icsk->icsk_probes_out = 0;
tp->rcv_tstamp = tcp_jiffies32;
if (!prior_packets)
@ -4322,15 +4322,15 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb)
/* We want the right error as BSD sees it (and indeed as we do). */
switch (sk->sk_state) {
case TCP_SYN_SENT:
sk->sk_err = ECONNREFUSED;
WRITE_ONCE(sk->sk_err, ECONNREFUSED);
break;
case TCP_CLOSE_WAIT:
sk->sk_err = EPIPE;
WRITE_ONCE(sk->sk_err, EPIPE);
break;
case TCP_CLOSE:
return;
default:
sk->sk_err = ECONNRESET;
WRITE_ONCE(sk->sk_err, ECONNRESET);
}
/* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb();

View File

@ -361,7 +361,7 @@ void tcp_v4_mtu_reduced(struct sock *sk)
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
WRITE_ONCE(sk->sk_err_soft, EMSGSIZE);
mtu = dst_mtu(dst);
@ -596,13 +596,13 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
tcp_done(sk);
} else {
sk->sk_err_soft = err;
WRITE_ONCE(sk->sk_err_soft, err);
}
goto out;
}
@ -625,10 +625,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
} else { /* Only an error on timeout */
sk->sk_err_soft = err;
WRITE_ONCE(sk->sk_err_soft, err);
}
out:

View File

@ -3699,7 +3699,7 @@ static void tcp_connect_init(struct sock *sk)
tp->rx_opt.rcv_wscale = rcv_wscale;
tp->rcv_ssthresh = tp->rcv_wnd;
sk->sk_err = 0;
WRITE_ONCE(sk->sk_err, 0);
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
tcp_init_wl(tp, 0);

View File

@ -67,7 +67,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT);
sk_error_report(sk);
tcp_write_queue_purge(sk);
@ -110,7 +110,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
shift++;
/* If some dubious ICMP arrived, penalize even more. */
if (sk->sk_err_soft)
if (READ_ONCE(sk->sk_err_soft))
shift++;
if (tcp_check_oom(sk, shift)) {
@ -146,7 +146,7 @@ static int tcp_orphan_retries(struct sock *sk, bool alive)
int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
/* We know from an ICMP that something is wrong. */
if (sk->sk_err_soft && !alive)
if (READ_ONCE(sk->sk_err_soft) && !alive)
retries = 0;
/* However, if socket sent something recently, select some safe

View File

@ -845,7 +845,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
if (IS_ERR(dst)) {
sk->sk_route_caps = 0;
sk->sk_err_soft = -PTR_ERR(dst);
WRITE_ONCE(sk->sk_err_soft, -PTR_ERR(dst));
return PTR_ERR(dst);
}

View File

@ -120,7 +120,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
dst = inet6_csk_route_socket(sk, &fl6);
if (IS_ERR(dst)) {
sk->sk_err_soft = -PTR_ERR(dst);
WRITE_ONCE(sk->sk_err_soft, -PTR_ERR(dst));
sk->sk_route_caps = 0;
kfree_skb(skb);
return PTR_ERR(dst);

View File

@ -493,12 +493,13 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
tcp_done(sk);
} else
sk->sk_err_soft = err;
} else {
WRITE_ONCE(sk->sk_err_soft, err);
}
goto out;
case TCP_LISTEN:
break;
@ -512,11 +513,11 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
if (!sock_owned_by_user(sk) && np->recverr) {
sk->sk_err = err;
WRITE_ONCE(sk->sk_err, err);
sk_error_report(sk);
} else
sk->sk_err_soft = err;
} else {
WRITE_ONCE(sk->sk_err_soft, err);
}
out:
bh_unlock_sock(sk);
sock_put(sk);

View File

@ -2019,7 +2019,7 @@ static int mptcp_event_put_token_and_ssk(struct sk_buff *skb,
nla_put_s32(skb, MPTCP_ATTR_IF_IDX, ssk->sk_bound_dev_if))
return -EMSGSIZE;
sk_err = ssk->sk_err;
sk_err = READ_ONCE(ssk->sk_err);
if (sk_err && sk->sk_state == TCP_ESTABLISHED &&
nla_put_u8(skb, MPTCP_ATTR_ERROR, sk_err))
return -EMSGSIZE;

View File

@ -2463,15 +2463,15 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
/* Mirror the tcp_reset() error propagation */
switch (sk->sk_state) {
case TCP_SYN_SENT:
sk->sk_err = ECONNREFUSED;
WRITE_ONCE(sk->sk_err, ECONNREFUSED);
break;
case TCP_CLOSE_WAIT:
sk->sk_err = EPIPE;
WRITE_ONCE(sk->sk_err, EPIPE);
break;
case TCP_CLOSE:
return;
default:
sk->sk_err = ECONNRESET;
WRITE_ONCE(sk->sk_err, ECONNRESET);
}
inet_sk_state_store(sk, TCP_CLOSE);
@ -3791,7 +3791,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
/* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
smp_rmb();
if (sk->sk_err)
if (READ_ONCE(sk->sk_err))
mask |= EPOLLERR;
return mask;

View File

@ -1335,7 +1335,7 @@ fallback:
subflow->reset_reason = MPTCP_RST_EMPTCP;
reset:
ssk->sk_err = EBADMSG;
WRITE_ONCE(ssk->sk_err, EBADMSG);
tcp_set_state(ssk, TCP_CLOSE);
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
@ -1419,7 +1419,7 @@ void __mptcp_error_report(struct sock *sk)
ssk_state = inet_sk_state_load(ssk);
if (ssk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DEAD))
inet_sk_state_store(sk, ssk_state);
sk->sk_err = -err;
WRITE_ONCE(sk->sk_err, -err);
/* This barrier is coupled with smp_rmb() in mptcp_poll() */
smp_wmb();

View File

@ -585,7 +585,7 @@ static void sctp_v4_err_handle(struct sctp_transport *t, struct sk_buff *skb,
sk->sk_err = err;
sk_error_report(sk);
} else { /* Only an error on timeout */
sk->sk_err_soft = err;
WRITE_ONCE(sk->sk_err_soft, err);
}
}

View File

@ -155,7 +155,7 @@ static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb,
sk->sk_err = err;
sk_error_report(sk);
} else {
sk->sk_err_soft = err;
WRITE_ONCE(sk->sk_err_soft, err);
}
}

View File

@ -557,7 +557,7 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
* when peer was not connected to us.
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
other->sk_err = ECONNRESET;
WRITE_ONCE(other->sk_err, ECONNRESET);
sk_error_report(other);
}
}
@ -630,7 +630,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
/* No more writes */
skpair->sk_shutdown = SHUTDOWN_MASK;
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
skpair->sk_err = ECONNRESET;
WRITE_ONCE(skpair->sk_err, ECONNRESET);
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
@ -3165,7 +3165,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
mask = 0;
/* exceptional events? */
if (sk->sk_err)
if (READ_ONCE(sk->sk_err))
mask |= EPOLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= EPOLLHUP;
@ -3208,7 +3208,8 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
mask = 0;
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
if (READ_ONCE(sk->sk_err) ||
!skb_queue_empty_lockless(&sk->sk_error_queue))
mask |= EPOLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);