hugetlb: split hugetlb_hstate_alloc_pages

1G and 2M huge pages have different allocation and initialization logic,
which leads to subtle differences in parallelization.  Therefore, it is
appropriate to split hugetlb_hstate_alloc_pages into gigantic and
non-gigantic.

This patch has no functional changes.

Link: https://lkml.kernel.org/r/20240222140422.393911-3-gang.li@linux.dev
Signed-off-by: Gang Li <ligang.bdlg@bytedance.com>
Tested-by: David Rientjes <rientjes@google.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Cc: Alexey Dobriyan <adobriyan@gmail.com>
Cc: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Gang Li 2024-02-22 22:04:15 +08:00 committed by Andrew Morton
parent fc37bbb328
commit d5c3eb3f50
1 changed files with 43 additions and 44 deletions

View File

@ -3509,6 +3509,43 @@ static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,
}
}
static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
{
unsigned long i;
for (i = 0; i < h->max_huge_pages; ++i) {
if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
break;
cond_resched();
}
return i;
}
static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
{
unsigned long i;
struct folio *folio;
LIST_HEAD(folio_list);
nodemask_t node_alloc_noretry;
/* Bit mask controlling how hard we retry per-node allocations.*/
nodes_clear(node_alloc_noretry);
for (i = 0; i < h->max_huge_pages; ++i) {
folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
&node_alloc_noretry);
if (!folio)
break;
list_add(&folio->lru, &folio_list);
cond_resched();
}
prep_and_add_allocated_folios(h, &folio_list);
return i;
}
/*
* NOTE: this routine is called in different contexts for gigantic and
* non-gigantic pages.
@ -3522,10 +3559,7 @@ static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,
*/
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long i;
struct folio *folio;
LIST_HEAD(folio_list);
nodemask_t *node_alloc_noretry;
unsigned long allocated;
/* skip gigantic hugepages allocation if hugetlb_cma enabled */
if (hstate_is_gigantic(h) && hugetlb_cma_size) {
@ -3538,47 +3572,12 @@ static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
return;
/* below will do all node balanced alloc */
if (!hstate_is_gigantic(h)) {
/*
* Bit mask controlling how hard we retry per-node allocations.
* Ignore errors as lower level routines can deal with
* node_alloc_noretry == NULL. If this kmalloc fails at boot
* time, we are likely in bigger trouble.
*/
node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
GFP_KERNEL);
} else {
/* allocations done at boot time */
node_alloc_noretry = NULL;
}
if (hstate_is_gigantic(h))
allocated = hugetlb_gigantic_pages_alloc_boot(h);
else
allocated = hugetlb_pages_alloc_boot(h);
/* bit mask controlling how hard we retry per-node allocations */
if (node_alloc_noretry)
nodes_clear(*node_alloc_noretry);
for (i = 0; i < h->max_huge_pages; ++i) {
if (hstate_is_gigantic(h)) {
/*
* gigantic pages not added to list as they are not
* added to pools now.
*/
if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
break;
} else {
folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
node_alloc_noretry);
if (!folio)
break;
list_add(&folio->lru, &folio_list);
}
cond_resched();
}
/* list will be empty if hstate_is_gigantic */
prep_and_add_allocated_folios(h, &folio_list);
hugetlb_hstate_alloc_pages_errcheck(i, h);
kfree(node_alloc_noretry);
hugetlb_hstate_alloc_pages_errcheck(allocated, h);
}
static void __init hugetlb_init_hstates(void)