mm, slub: extract get_partial() from new_slab_objects()

The later patches will need more fine grained control over individual actions
in ___slab_alloc(), the only caller of new_slab_objects(), so this is a first
preparatory step with no functional change.

This adds a goto label that appears unnecessary at this point, but will be
useful for later changes.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Christoph Lameter <cl@linux.com>
This commit is contained in:
Vlastimil Babka 2021-05-11 12:45:48 +02:00
parent 976b805c78
commit 2a904905ae
1 changed files with 6 additions and 6 deletions

View File

@ -2613,17 +2613,12 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
void *freelist;
void *freelist = NULL;
struct kmem_cache_cpu *c = *pc;
struct page *page;
WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
freelist = get_partial(s, flags, node, c);
if (freelist)
return freelist;
page = new_slab(s, flags, node);
if (page) {
c = raw_cpu_ptr(s->cpu_slab);
@ -2787,6 +2782,10 @@ new_slab:
goto redo;
}
freelist = get_partial(s, gfpflags, node, c);
if (freelist)
goto check_new_page;
freelist = new_slab_objects(s, gfpflags, node, &c);
if (unlikely(!freelist)) {
@ -2794,6 +2793,7 @@ new_slab:
return NULL;
}
check_new_page:
page = c->page;
if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
goto load_freelist;