mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 22:02:02 +00:00
tcp: add exponential backoff in __tcp_send_ack()
Whenever host is under very high memory pressure, __tcp_send_ack() skb allocation fails, and we setup a 200 ms (TCP_DELACK_MAX) timer before retrying. On hosts with high number of TCP sockets, we can spend considerable amount of cpu cycles in these attempts, add high pressure on various spinlocks in mm-layer, ultimately blocking threads attempting to free space from making any progress. This patch adds standard exponential backoff to avoid adding fuel to the fire. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b6b6d6533a
commit
a37c2134be
2 changed files with 10 additions and 4 deletions
|
@ -110,7 +110,7 @@ struct inet_connection_sock {
|
||||||
__u8 pending; /* ACK is pending */
|
__u8 pending; /* ACK is pending */
|
||||||
__u8 quick; /* Scheduled number of quick acks */
|
__u8 quick; /* Scheduled number of quick acks */
|
||||||
__u8 pingpong; /* The session is interactive */
|
__u8 pingpong; /* The session is interactive */
|
||||||
/* one byte hole. */
|
__u8 retry; /* Number of attempts */
|
||||||
__u32 ato; /* Predicted tick of soft clock */
|
__u32 ato; /* Predicted tick of soft clock */
|
||||||
unsigned long timeout; /* Currently scheduled timeout */
|
unsigned long timeout; /* Currently scheduled timeout */
|
||||||
__u32 lrcvtime; /* timestamp of last received data packet */
|
__u32 lrcvtime; /* timestamp of last received data packet */
|
||||||
|
@ -199,6 +199,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
|
||||||
#endif
|
#endif
|
||||||
} else if (what == ICSK_TIME_DACK) {
|
} else if (what == ICSK_TIME_DACK) {
|
||||||
icsk->icsk_ack.pending = 0;
|
icsk->icsk_ack.pending = 0;
|
||||||
|
icsk->icsk_ack.retry = 0;
|
||||||
#ifdef INET_CSK_CLEAR_TIMERS
|
#ifdef INET_CSK_CLEAR_TIMERS
|
||||||
sk_stop_timer(sk, &icsk->icsk_delack_timer);
|
sk_stop_timer(sk, &icsk->icsk_delack_timer);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -3941,10 +3941,15 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
|
||||||
buff = alloc_skb(MAX_TCP_HEADER,
|
buff = alloc_skb(MAX_TCP_HEADER,
|
||||||
sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
|
sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
|
||||||
if (unlikely(!buff)) {
|
if (unlikely(!buff)) {
|
||||||
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
|
unsigned long delay;
|
||||||
|
|
||||||
|
delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
|
||||||
|
if (delay < TCP_RTO_MAX)
|
||||||
|
icsk->icsk_ack.retry++;
|
||||||
inet_csk_schedule_ack(sk);
|
inet_csk_schedule_ack(sk);
|
||||||
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
|
icsk->icsk_ack.ato = TCP_ATO_MIN;
|
||||||
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
|
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
|
||||||
TCP_DELACK_MAX, TCP_RTO_MAX);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue