udp: introduce __sk_mem_schedule() usage

Keep the accounting schema consistent across different protocols
with __sk_mem_schedule(). Besides, it adjusts a little bit on how
to calculate forward allocated memory compared to before. After
applied this patch, we could avoid receive path scheduling extra
amount of memory.

Link: https://lore.kernel.org/lkml/20230221110344.82818-1-kerneljasonxing@gmail.com/
Signed-off-by: Jason Xing <kernelxing@tencent.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20230308021153.99777-1-kerneljasonxing@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jason Xing 2023-03-08 10:11:53 +08:00 committed by Jakub Kicinski
parent d0928c1c5b
commit fd9c31f834
1 changed files with 16 additions and 11 deletions

View File

@ -1531,10 +1531,21 @@ static void busylock_release(spinlock_t *busy)
spin_unlock(busy);
}
static int udp_rmem_schedule(struct sock *sk, int size)
{
int delta;
delta = size - sk->sk_forward_alloc;
if (delta > 0 && !__sk_mem_schedule(sk, delta, SK_MEM_RECV))
return -ENOBUFS;
return 0;
}
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
{
struct sk_buff_head *list = &sk->sk_receive_queue;
int rmem, delta, amt, err = -ENOMEM;
int rmem, err = -ENOMEM;
spinlock_t *busy = NULL;
int size;
@ -1567,16 +1578,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
goto uncharge_drop;
spin_lock(&list->lock);
if (size >= sk->sk_forward_alloc) {
amt = sk_mem_pages(size);
delta = amt << PAGE_SHIFT;
if (!__sk_mem_raise_allocated(sk, delta, amt, SK_MEM_RECV)) {
err = -ENOBUFS;
spin_unlock(&list->lock);
goto uncharge_drop;
}
sk->sk_forward_alloc += delta;
err = udp_rmem_schedule(sk, size);
if (err) {
spin_unlock(&list->lock);
goto uncharge_drop;
}
sk->sk_forward_alloc -= size;