net: skbuff: drop the word head from skb cache

skbuff_head_cache is misnamed (perhaps for historical reasons?)
because it does not hold heads. Head is the buffer which skb->data
points to, and also where shinfo lives. struct sk_buff is a metadata
structure, not the head.

Eric recently added skb_small_head_cache (which allocates actual
head buffers), let that serve as an excuse to finally clean this up :)

Leave the user-space visible name intact, it could possibly be uAPI.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jakub Kicinski 2023-02-08 22:06:42 -08:00 committed by David S. Miller
parent 01a7ee36e7
commit 025a785ff0
5 changed files with 20 additions and 22 deletions

View File

@ -1243,7 +1243,7 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb); void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb); void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache; extern struct kmem_cache *skbuff_cache;
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,

View File

@ -361,7 +361,7 @@ static int cpu_map_kthread_run(void *data)
/* Support running another XDP prog on this CPU */ /* Support running another XDP prog on this CPU */
nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list); nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
if (nframes) { if (nframes) {
m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs); m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
if (unlikely(m == 0)) { if (unlikely(m == 0)) {
for (i = 0; i < nframes; i++) for (i = 0; i < nframes; i++)
skbs[i] = NULL; /* effect: xdp_return_frame */ skbs[i] = NULL; /* effect: xdp_return_frame */

View File

@ -234,7 +234,7 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
int i, n; int i, n;
LIST_HEAD(list); LIST_HEAD(list);
n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs); n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs);
if (unlikely(n == 0)) { if (unlikely(n == 0)) {
for (i = 0; i < nframes; i++) for (i = 0; i < nframes; i++)
xdp_return_frame(frames[i]); xdp_return_frame(frames[i]);

View File

@ -84,7 +84,7 @@
#include "dev.h" #include "dev.h"
#include "sock_destructor.h" #include "sock_destructor.h"
struct kmem_cache *skbuff_head_cache __ro_after_init; struct kmem_cache *skbuff_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init; static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS #ifdef CONFIG_SKB_EXTENSIONS
static struct kmem_cache *skbuff_ext_cache __ro_after_init; static struct kmem_cache *skbuff_ext_cache __ro_after_init;
@ -285,7 +285,7 @@ static struct sk_buff *napi_skb_cache_get(void)
struct sk_buff *skb; struct sk_buff *skb;
if (unlikely(!nc->skb_count)) { if (unlikely(!nc->skb_count)) {
nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache, nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
GFP_ATOMIC, GFP_ATOMIC,
NAPI_SKB_CACHE_BULK, NAPI_SKB_CACHE_BULK,
nc->skb_cache); nc->skb_cache);
@ -294,7 +294,7 @@ static struct sk_buff *napi_skb_cache_get(void)
} }
skb = nc->skb_cache[--nc->skb_count]; skb = nc->skb_cache[--nc->skb_count];
kasan_unpoison_object_data(skbuff_head_cache, skb); kasan_unpoison_object_data(skbuff_cache, skb);
return skb; return skb;
} }
@ -352,7 +352,7 @@ struct sk_buff *slab_build_skb(void *data)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int size; unsigned int size;
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
@ -403,7 +403,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{ {
struct sk_buff *skb; struct sk_buff *skb;
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;
@ -585,7 +585,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
u8 *data; u8 *data;
cache = (flags & SKB_ALLOC_FCLONE) cache = (flags & SKB_ALLOC_FCLONE)
? skbuff_fclone_cache : skbuff_head_cache; ? skbuff_fclone_cache : skbuff_cache;
if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX)) if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
gfp_mask |= __GFP_MEMALLOC; gfp_mask |= __GFP_MEMALLOC;
@ -921,7 +921,7 @@ static void kfree_skbmem(struct sk_buff *skb)
switch (skb->fclone) { switch (skb->fclone) {
case SKB_FCLONE_UNAVAILABLE: case SKB_FCLONE_UNAVAILABLE:
kmem_cache_free(skbuff_head_cache, skb); kmem_cache_free(skbuff_cache, skb);
return; return;
case SKB_FCLONE_ORIG: case SKB_FCLONE_ORIG:
@ -1035,7 +1035,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb,
sa->skb_array[sa->skb_count++] = skb; sa->skb_array[sa->skb_count++] = skb;
if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
kmem_cache_free_bulk(skbuff_head_cache, KFREE_SKB_BULK_SIZE, kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
sa->skb_array); sa->skb_array);
sa->skb_count = 0; sa->skb_count = 0;
} }
@ -1060,8 +1060,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
} }
if (sa.skb_count) if (sa.skb_count)
kmem_cache_free_bulk(skbuff_head_cache, sa.skb_count, kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
sa.skb_array);
} }
EXPORT_SYMBOL(kfree_skb_list_reason); EXPORT_SYMBOL(kfree_skb_list_reason);
@ -1215,15 +1214,15 @@ static void napi_skb_cache_put(struct sk_buff *skb)
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
u32 i; u32 i;
kasan_poison_object_data(skbuff_head_cache, skb); kasan_poison_object_data(skbuff_cache, skb);
nc->skb_cache[nc->skb_count++] = skb; nc->skb_cache[nc->skb_count++] = skb;
if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++) for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
kasan_unpoison_object_data(skbuff_head_cache, kasan_unpoison_object_data(skbuff_cache,
nc->skb_cache[i]); nc->skb_cache[i]);
kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF, kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
nc->skb_cache + NAPI_SKB_CACHE_HALF); nc->skb_cache + NAPI_SKB_CACHE_HALF);
nc->skb_count = NAPI_SKB_CACHE_HALF; nc->skb_count = NAPI_SKB_CACHE_HALF;
} }
@ -1807,7 +1806,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
if (skb_pfmemalloc(skb)) if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC; gfp_mask |= __GFP_MEMALLOC;
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); n = kmem_cache_alloc(skbuff_cache, gfp_mask);
if (!n) if (!n)
return NULL; return NULL;
@ -4677,7 +4676,7 @@ static void skb_extensions_init(void) {}
void __init skb_init(void) void __init skb_init(void)
{ {
skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache", skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff), sizeof(struct sk_buff),
0, 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
@ -5556,7 +5555,7 @@ void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
{ {
if (head_stolen) { if (head_stolen) {
skb_release_head_state(skb); skb_release_head_state(skb);
kmem_cache_free(skbuff_head_cache, skb); kmem_cache_free(skbuff_cache, skb);
} else { } else {
__kfree_skb(skb); __kfree_skb(skb);
} }

View File

@ -603,8 +603,7 @@ EXPORT_SYMBOL_GPL(xdp_warn);
int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp) int xdp_alloc_skb_bulk(void **skbs, int n_skb, gfp_t gfp)
{ {
n_skb = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n_skb = kmem_cache_alloc_bulk(skbuff_cache, gfp, n_skb, skbs);
n_skb, skbs);
if (unlikely(!n_skb)) if (unlikely(!n_skb))
return -ENOMEM; return -ENOMEM;
@ -673,7 +672,7 @@ struct sk_buff *xdp_build_skb_from_frame(struct xdp_frame *xdpf,
{ {
struct sk_buff *skb; struct sk_buff *skb;
skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
if (unlikely(!skb)) if (unlikely(!skb))
return NULL; return NULL;