diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9db8bab27fa2..c2d585c4f7c5 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2343,6 +2343,11 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, } #endif + /* Record the max length of recvmsg() calls for future allocations */ + nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len); + nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len, + 16384); + copied = data_skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; @@ -2587,7 +2592,27 @@ static int netlink_dump(struct sock *sk) if (!netlink_rx_is_mmaped(sk) && atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) goto errout_skb; - skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, GFP_KERNEL); + + /* NLMSG_GOODSIZE is small to avoid high order allocations being + * required, but it makes sense to _attempt_ a 16K bytes allocation + * to reduce number of system calls on dump operations, if user + * ever provided a big enough buffer. + */ + if (alloc_size < nlk->max_recvmsg_len) { + skb = netlink_alloc_skb(sk, + nlk->max_recvmsg_len, + nlk->portid, + GFP_KERNEL | + __GFP_NOWARN | + __GFP_NORETRY); + /* available room should be exact amount to avoid MSG_TRUNC */ + if (skb) + skb_reserve(skb, skb_tailroom(skb) - + nlk->max_recvmsg_len); + } + if (!skb) + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid, + GFP_KERNEL); if (!skb) goto errout_skb; netlink_skb_set_owner_r(skb, sk); diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index acbd774eeb7c..ed13a790b00e 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -31,6 +31,7 @@ struct netlink_sock { u32 ngroups; unsigned long *groups; unsigned long state; + size_t max_recvmsg_len; wait_queue_head_t wait; bool cb_running; struct netlink_callback cb;