mm/mempolicy: Use vma_alloc_folio() in new_page()

Simplify new_page() by unifying the THP and base page cases, and
handle orders other than 0 and HPAGE_PMD_ORDER correctly.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-04-04 15:23:39 -04:00
parent f584b68005
commit ec4858e07e
1 changed files with 11 additions and 14 deletions

View File

@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
*/
static struct page *new_page(struct page *page, unsigned long start)
{
struct folio *dst, *src = page_folio(page);
struct vm_area_struct *vma;
unsigned long address;
gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
vma = find_vma(current->mm, start);
while (vma) {
@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start)
vma = vma->vm_next;
}
if (PageHuge(page)) {
return alloc_huge_page_vma(page_hstate(compound_head(page)),
if (folio_test_hugetlb(src))
return alloc_huge_page_vma(page_hstate(&src->page),
vma, address);
} else if (PageTransHuge(page)) {
struct page *thp;
thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
}
if (folio_test_large(src))
gfp = GFP_TRANSHUGE;
/*
* if !vma, alloc_page_vma() will use task or system default policy
* if !vma, vma_alloc_folio() will use task or system default policy
*/
return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
vma, address);
dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
folio_test_large(src));
return &dst->page;
}
#else