mm/migrate: Use a folio in alloc_migration_target()

This removes an assumption that a large folio is HPAGE_PMD_ORDER
as well as letting us remove the call to prep_transhuge_page()
and a few hidden calls to compound_head().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-04-04 14:35:04 -04:00
parent 83a8441f8d
commit ffe06786b5

View file

@ -1520,10 +1520,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
struct page *alloc_migration_target(struct page *page, unsigned long private)
{
struct folio *folio = page_folio(page);
struct migration_target_control *mtc;
gfp_t gfp_mask;
unsigned int order = 0;
struct page *new_page = NULL;
struct folio *new_folio = NULL;
int nid;
int zidx;
@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
gfp_mask = mtc->gfp_mask;
nid = mtc->nid;
if (nid == NUMA_NO_NODE)
nid = page_to_nid(page);
nid = folio_nid(folio);
if (PageHuge(page)) {
struct hstate *h = page_hstate(compound_head(page));
if (folio_test_hugetlb(folio)) {
struct hstate *h = page_hstate(&folio->page);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
}
if (PageTransHuge(page)) {
if (folio_test_large(folio)) {
/*
* clear __GFP_RECLAIM to make the migration callback
* consistent with regular THP allocations.
*/
gfp_mask &= ~__GFP_RECLAIM;
gfp_mask |= GFP_TRANSHUGE;
order = HPAGE_PMD_ORDER;
order = folio_order(folio);
}
zidx = zone_idx(page_zone(page));
zidx = zone_idx(folio_zone(folio));
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;
new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
if (new_page && PageTransHuge(new_page))
prep_transhuge_page(new_page);
return new_page;
return &new_folio->page;
}
#ifdef CONFIG_NUMA