mm, slub: do initial checks in ___slab_alloc() with irqs enabled

As another step of shortening irq disabled sections in ___slab_alloc(), delay
disabling irqs until we pass the initial checks if there is a cached percpu
slab and it's suitable for our allocation.

Now we have to recheck c->page after actually disabling irqs as an allocation
in irq handler might have replaced it.

Because we call pfmemalloc_match() as one of the checks, we might hit
VM_BUG_ON_PAGE(!PageSlab(page)) in PageSlabPfmemalloc in case we get
interrupted and the page is freed. Thus introduce a pfmemalloc_match_unsafe()
variant that lacks the PageSlab check.

Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
This commit is contained in:
Vlastimil Babka 2021-05-08 02:28:02 +02:00
parent e500059ba5
commit 0b303fb402
2 changed files with 54 additions and 9 deletions

View File

@ -815,6 +815,15 @@ static inline int PageSlabPfmemalloc(struct page *page)
return PageActive(page);
}
/*
* A version of PageSlabPfmemalloc() for opportunistic checks where the page
* might have been freed under us and not be a PageSlab anymore.
*/
static inline int __PageSlabPfmemalloc(struct page *page)
{
return PageActive(page);
}
static inline void SetPageSlabPfmemalloc(struct page *page)
{
VM_BUG_ON_PAGE(!PageSlab(page), page);

View File

@ -2620,6 +2620,19 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
return true;
}
/*
* A variant of pfmemalloc_match() that tests page flags without asserting
* PageSlab. Intended for opportunistic checks before taking a lock and
* rechecking that nobody else freed the page under us.
*/
static inline bool pfmemalloc_match_unsafe(struct page *page, gfp_t gfpflags)
{
if (unlikely(__PageSlabPfmemalloc(page)))
return gfp_pfmemalloc_allowed(gfpflags);
return true;
}
/*
* Check the page->freelist of a page and either transfer the freelist to the
* per cpu freelist or deactivate the page.
@ -2682,8 +2695,9 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_SLOWPATH);
local_irq_save(flags);
page = c->page;
reread_page:
page = READ_ONCE(c->page);
if (!page) {
/*
* if the node is not online or has no normal memory, just
@ -2692,6 +2706,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(node != NUMA_NO_NODE &&
!node_isset(node, slab_nodes)))
node = NUMA_NO_NODE;
local_irq_save(flags);
if (unlikely(c->page)) {
local_irq_restore(flags);
goto reread_page;
}
goto new_slab;
}
redo:
@ -2706,8 +2725,7 @@ redo:
goto redo;
} else {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, page, c->freelist, c);
goto new_slab;
goto deactivate_slab;
}
}
@ -2716,12 +2734,15 @@ redo:
* PFMEMALLOC but right now, we are losing the pfmemalloc
* information when the page leaves the per-cpu allocator
*/
if (unlikely(!pfmemalloc_match(page, gfpflags))) {
deactivate_slab(s, page, c->freelist, c);
goto new_slab;
}
if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
goto deactivate_slab;
/* must check again c->freelist in case of cpu migration or IRQ */
/* must check again c->page in case IRQ handler changed it */
local_irq_save(flags);
if (unlikely(page != c->page)) {
local_irq_restore(flags);
goto reread_page;
}
freelist = c->freelist;
if (freelist)
goto load_freelist;
@ -2737,6 +2758,9 @@ redo:
stat(s, ALLOC_REFILL);
load_freelist:
lockdep_assert_irqs_disabled();
/*
* freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained.
@ -2748,11 +2772,23 @@ load_freelist:
local_irq_restore(flags);
return freelist;
deactivate_slab:
local_irq_save(flags);
if (page != c->page) {
local_irq_restore(flags);
goto reread_page;
}
deactivate_slab(s, page, c->freelist, c);
new_slab:
lockdep_assert_irqs_disabled();
if (slub_percpu_partial(c)) {
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
local_irq_restore(flags);
stat(s, CPU_PARTIAL_ALLOC);
goto redo;
}