mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-25 11:55:37 +00:00
mm, slub: don't aggressively inline with CONFIG_SLUB_TINY
SLUB fastpaths use __always_inline to avoid function calls. With CONFIG_SLUB_TINY we would rather save the memory. Add a __fastpath_inline macro that's __always_inline normally but empty with CONFIG_SLUB_TINY. bloat-o-meter results on x86_64 mm/slub.o: add/remove: 3/1 grow/shrink: 1/8 up/down: 865/-1784 (-919) Function old new delta kmem_cache_free 20 281 +261 slab_alloc_node.isra - 245 +245 slab_free.constprop.isra - 231 +231 __kmem_cache_alloc_lru.isra - 128 +128 __kmem_cache_release 88 83 -5 __kmem_cache_create 1446 1436 -10 __kmem_cache_free 271 142 -129 kmem_cache_alloc_node 330 127 -203 kmem_cache_free_bulk.part 826 613 -213 __kmem_cache_alloc_node 230 10 -220 kmem_cache_alloc_lru 325 12 -313 kmem_cache_alloc 325 10 -315 kmem_cache_free.part 376 - -376 Total: Before=26103, After=25184, chg -3.52% Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mike Rapoport <rppt@linux.ibm.com> Reviewed-by: Christoph Lameter <cl@linux.com> Acked-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
This commit is contained in:
parent
0af8489b02
commit
be784ba861
1 changed files with 10 additions and 4 deletions
14
mm/slub.c
14
mm/slub.c
|
@ -187,6 +187,12 @@ do { \
|
|||
#define USE_LOCKLESS_FAST_PATH() (false)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_SLUB_TINY
|
||||
#define __fastpath_inline __always_inline
|
||||
#else
|
||||
#define __fastpath_inline
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SLUB_DEBUG
|
||||
#ifdef CONFIG_SLUB_DEBUG_ON
|
||||
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
|
||||
|
@ -3386,7 +3392,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
|
|||
*
|
||||
* Otherwise we can simply pick the next object from the lockless free list.
|
||||
*/
|
||||
static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
|
||||
static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
|
||||
gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
|
||||
{
|
||||
void *object;
|
||||
|
@ -3412,13 +3418,13 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, struct list_l
|
|||
return object;
|
||||
}
|
||||
|
||||
static __always_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
|
||||
static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru,
|
||||
gfp_t gfpflags, unsigned long addr, size_t orig_size)
|
||||
{
|
||||
return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
static __fastpath_inline
|
||||
void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
|
||||
gfp_t gfpflags)
|
||||
{
|
||||
|
@ -3733,7 +3739,7 @@ static void do_slab_free(struct kmem_cache *s,
|
|||
}
|
||||
#endif /* CONFIG_SLUB_TINY */
|
||||
|
||||
static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
|
||||
static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab,
|
||||
void *head, void *tail, void **p, int cnt,
|
||||
unsigned long addr)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue