mm, slub: move reset of c->page and freelist out of deactivate_slab()

deactivate_slab() removes the cpu slab by merging the cpu freelist with slab's
freelist and putting the slab on the proper node's list. It also sets the
respective kmem_cache_cpu pointers to NULL.

By extracting the kmem_cache_cpu operations from the function, we can make it
not dependent on disabled irqs.

Also if we return a single free pointer from ___slab_alloc, we no longer have
to assign kmem_cache_cpu.page before deactivation or care if somebody preempted
us and assigned a different page to our kmem_cache_cpu in the process.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Vlastimil Babka 2021-05-12 13:53:34 +02:00
parent 4b1f449ded
commit a019d20162

View file

@ -2209,10 +2209,13 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
}
/*
* Remove the cpu slab
* Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
* unfreezes the slabs and puts it on the proper list.
* Assumes the slab has been already safely taken away from kmem_cache_cpu
* by the caller.
*/
static void deactivate_slab(struct kmem_cache *s, struct page *page,
void *freelist, struct kmem_cache_cpu *c)
void *freelist)
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@ -2341,9 +2344,6 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
discard_slab(s, page);
stat(s, FREE_SLAB);
}
c->page = NULL;
c->freelist = NULL;
}
/*
@ -2468,10 +2468,16 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
stat(s, CPUSLAB_FLUSH);
deactivate_slab(s, c->page, c->freelist, c);
void *freelist = c->freelist;
struct page *page = c->page;
c->page = NULL;
c->freelist = NULL;
c->tid = next_tid(c->tid);
deactivate_slab(s, page, freelist);
stat(s, CPUSLAB_FLUSH);
}
/*
@ -2769,7 +2775,10 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
local_irq_restore(flags);
goto reread_page;
}
deactivate_slab(s, page, c->freelist, c);
freelist = c->freelist;
c->page = NULL;
c->freelist = NULL;
deactivate_slab(s, page, freelist);
local_irq_restore(flags);
new_slab:
@ -2848,11 +2857,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
return_single:
local_irq_save(flags);
if (unlikely(c->page))
flush_slab(s, c);
c->page = page;
deactivate_slab(s, page, get_freepointer(s, freelist), c);
deactivate_slab(s, page, get_freepointer(s, freelist));
local_irq_restore(flags);
return freelist;
}