diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c3df3b55da97..47ab28a37f2f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1243,7 +1243,7 @@ static inline void consume_skb(struct sk_buff *skb) void __consume_stateless_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb); -extern struct kmem_cache *skbuff_head_cache; +extern struct kmem_cache *skbuff_cache; void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index e0b2d016f0bf..d2110c1f6fa6 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -361,7 +361,7 @@ static int cpu_map_kthread_run(void *data) /* Support running another XDP prog on this CPU */ nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list); if (nframes) { - m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); + m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs); if (unlikely(m == 0)) { for (i = 0; i < nframes; i++) skbs[i] = NULL; /* effect: xdp_return_frame */ diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 8da0d73b368e..2b954326894f 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -234,7 +234,7 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes, int i, n; LIST_HEAD(list); - n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs); + n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs); if (unlikely(n == 0)) { for (i = 0; i < nframes; i++) xdp_return_frame(frames[i]); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 70a6088e8326..13ea10cf8544 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -84,7 +84,7 @@ #include "dev.h" #include "sock_destructor.h" -struct kmem_cache *skbuff_head_cache __ro_after_init; +struct kmem_cache *skbuff_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init; #ifdef CONFIG_SKB_EXTENSIONS static struct kmem_cache *skbuff_ext_cache __ro_after_init; @@ -285,7 +285,7 @@ static struct sk_buff *napi_skb_cache_get(void) struct sk_buff *skb; if (unlikely(!nc->skb_count)) { - nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, + nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache, GFP_ATOMIC, NAPI_SKB_CACHE_BULK, nc->skb_cache); @@ -294,7 +294,7 @@ static struct sk_buff *napi_skb_cache_get(void) } skb = nc->skb_cache[--nc->skb_count]; - kasan_unpoison_object_data(skbuff_head_cache, skb); + kasan_unpoison_object_data(skbuff_cache, skb); return skb; } @@ -352,7 +352,7 @@ struct sk_buff *slab_build_skb(void *data) struct sk_buff *skb; unsigned int size; - skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); + skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); if (unlikely(!skb)) return NULL; @@ -403,7 +403,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size) { struct sk_buff *skb; - skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); + skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); if (unlikely(!skb)) return NULL; @@ -585,7 +585,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, u8 *data; cache = (flags & SKB_ALLOC_FCLONE) - ? skbuff_fclone_cache : skbuff_head_cache; + ? skbuff_fclone_cache : skbuff_cache; if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) gfp_mask |= __GFP_MEMALLOC; @@ -921,7 +921,7 @@ static void kfree_skbmem(struct sk_buff *skb) switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: - kmem_cache_free(skbuff_head_cache, skb); + kmem_cache_free(skbuff_cache, skb); return; case SKB_FCLONE_ORIG: @@ -1035,7 +1035,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb, sa->skb_array[sa->skb_count++] = skb; if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { - kmem_cache_free_bulk(skbuff_head_cache, KFREE_SKB_BULK_SIZE, + kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE, sa->skb_array); sa->skb_count = 0; } @@ -1060,8 +1060,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) } if (sa.skb_count) - kmem_cache_free_bulk(skbuff_head_cache, sa.skb_count, - sa.skb_array); + kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array); } EXPORT_SYMBOL(kfree_skb_list_reason); @@ -1215,15 +1214,15 @@ static void napi_skb_cache_put(struct sk_buff *skb) struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); u32 i; - kasan_poison_object_data(skbuff_head_cache, skb); + kasan_poison_object_data(skbuff_cache, skb); nc->skb_cache[nc->skb_count++] = skb; if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) - kasan_unpoison_object_data(skbuff_head_cache, + kasan_unpoison_object_data(skbuff_cache, nc->skb_cache[i]); - kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, + kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF, nc->skb_cache + NAPI_SKB_CACHE_HALF); nc->skb_count = NAPI_SKB_CACHE_HALF; } @@ -1807,7 +1806,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) if (skb_pfmemalloc(skb)) gfp_mask |= __GFP_MEMALLOC; - n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); + n = kmem_cache_alloc(skbuff_cache, gfp_mask); if (!n) return NULL; @@ -4677,7 +4676,7 @@ static void skb_extensions_init(void) {} void __init skb_init(void) { - skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", + skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache", sizeof(struct sk_buff), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, @@ -5556,7 +5555,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) { if (head_stolen) { skb_release_head_state(skb); - kmem_cache_free(skbuff_head_cache, skb); + kmem_cache_free(skbuff_cache, skb); } else { __kfree_skb(skb); } diff --git a/net/core/xdp.c b/net/core/xdp.c index a5a7ecf6391c..03938fe6d33a 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -603,8 +603,7 @@ EXPORT_SYMBOL_GPL(xdp_warn); int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) { - n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, - n_skb, skbs); + n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs); if (unlikely(!n_skb)) return -ENOMEM; @@ -673,7 +672,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf, { struct sk_buff *skb; - skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); + skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC); if (unlikely(!skb)) return NULL;