mm/slab_common: kmalloc_node: pass large requests to page allocator

Now that kmalloc_large_node() is in common code, pass large requests
to page allocator in kmalloc_node() using kmalloc_large_node().

One problem is that currently there is no tracepoint in
kmalloc_large_node(). Instead of simply putting tracepoint in it,
use kmalloc_large_node{,_notrace} depending on its caller to show
useful address for both inlined kmalloc_node() and
__kmalloc_node_track_caller() when large objects are allocated.

Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Hyeonggon Yoo 2022-08-17 19:18:17 +09:00 committed by Vlastimil Babka
parent a0c3b94002
commit bf37d79102
4 changed files with 32 additions and 9 deletions

View File

@ -571,23 +571,35 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
#ifndef CONFIG_SLOB
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
if (__builtin_constant_p(size) &&
size <= KMALLOC_MAX_CACHE_SIZE) {
unsigned int i = kmalloc_index(size);
if (__builtin_constant_p(size)) {
unsigned int index;
if (!i)
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large_node(size, flags, node);
index = kmalloc_index(size);
if (!index)
return ZERO_SIZE_PTR;
return kmem_cache_alloc_node_trace(
kmalloc_caches[kmalloc_type(flags)][i],
kmalloc_caches[kmalloc_type(flags)][index],
flags, node, size);
}
#endif
return __kmalloc_node(size, flags, node);
}
#else
static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) && size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large_node(size, flags, node);
return __kmalloc_node(size, flags, node);
}
#endif
/**
* kmalloc_array - allocate memory for an array.

View File

@ -275,6 +275,8 @@ void create_kmalloc_caches(slab_flags_t);
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
#endif
void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node);
gfp_t kmalloc_fix_flags(gfp_t flags);
/* Functions provided by the slab allocators */

View File

@ -928,7 +928,7 @@ void *kmalloc_large(size_t size, gfp_t flags)
}
EXPORT_SYMBOL(kmalloc_large);
void *kmalloc_large_node(size_t size, gfp_t flags, int node)
void *kmalloc_large_node_notrace(size_t size, gfp_t flags, int node)
{
struct page *page;
void *ptr = NULL;
@ -948,6 +948,15 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
return ptr;
}
void *kmalloc_large_node(size_t size, gfp_t flags, int node)
{
void *ret = kmalloc_large_node_notrace(size, flags, node);
trace_kmalloc_node(_RET_IP_, ret, NULL, size,
PAGE_SIZE << get_order(size), flags, node);
return ret;
}
EXPORT_SYMBOL(kmalloc_large_node);
#ifdef CONFIG_SLAB_FREELIST_RANDOM

View File

@ -4401,7 +4401,7 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller
void *ret;
if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
ret = kmalloc_large_node(size, flags, node);
ret = kmalloc_large_node_notrace(size, flags, node);
trace_kmalloc_node(caller, ret, NULL,
size, PAGE_SIZE << get_order(size),