mm: convert migrate_pages() to work on folios

Almost all of the callers & implementors of migrate_pages() were already
converted to use folios.  compaction_alloc() & compaction_free() are
trivial to convert a part of this patch and not worth splitting out.

Link: https://lkml.kernel.org/r/20230513001101.276972-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: "Huang, Ying" <ying.huang@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-05-13 01:11:01 +01:00 committed by Andrew Morton
parent b2cac24819
commit 4e096ae180
7 changed files with 108 additions and 123 deletions

View File

@ -73,14 +73,13 @@ In kernel use of migrate_pages()
It also prevents the swapper or other scans from encountering
the page.
2. We need to have a function of type new_page_t that can be
2. We need to have a function of type new_folio_t that can be
passed to migrate_pages(). This function should figure out
how to allocate the correct new page given the old page.
how to allocate the correct new folio given the old folio.
3. The migrate_pages() function is called which attempts
to do the migration. It will call the function to allocate
the new page for each page that is considered for
moving.
the new folio for each folio that is considered for moving.
How migrate_pages() works
=========================

View File

@ -55,7 +55,7 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_
消失。它还可以防止交换器或其他扫描器遇到该页。
2. 我们需要有一个new_page_t类型的函数可以传递给migrate_pages()。这个函数应该计算
2. 我们需要有一个new_folio_t类型的函数可以传递给migrate_pages()。这个函数应该计算
出如何在给定的旧页面中分配正确的新页面。
3. migrate_pages()函数被调用,它试图进行迁移。它将调用该函数为每个被考虑迁移的页面分

View File

@ -7,8 +7,8 @@
#include <linux/migrate_mode.h>
#include <linux/hugetlb.h>
typedef struct page *new_page_t(struct page *page, unsigned long private);
typedef void free_page_t(struct page *page, unsigned long private);
typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control;
@ -67,10 +67,10 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode);
int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
struct page *alloc_migration_target(struct page *page, unsigned long private);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping,
@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping,
#else
static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
int reason, unsigned int *ret_succeeded)
static inline int migrate_pages(struct list_head *l, new_folio_t new,
free_folio_t free, unsigned long private,
enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; }
static inline struct page *alloc_migration_target(struct page *page,
static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)

View File

@ -1685,11 +1685,10 @@ splitmap:
* This is a migrate-callback that "allocates" freepages by taking pages
* from the isolated freelists in the block we are migrating to.
*/
static struct page *compaction_alloc(struct page *migratepage,
unsigned long data)
static struct folio *compaction_alloc(struct folio *src, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
struct page *freepage;
struct folio *dst;
if (list_empty(&cc->freepages)) {
isolate_freepages(cc);
@ -1698,11 +1697,11 @@ static struct page *compaction_alloc(struct page *migratepage,
return NULL;
}
freepage = list_entry(cc->freepages.next, struct page, lru);
list_del(&freepage->lru);
dst = list_entry(cc->freepages.next, struct folio, lru);
list_del(&dst->lru);
cc->nr_freepages--;
return freepage;
return dst;
}
/*
@ -1710,11 +1709,11 @@ static struct page *compaction_alloc(struct page *migratepage,
* freelist. All pages on the freelist are from the same zone, so there is no
* special handling needed for NUMA.
*/
static void compaction_free(struct page *page, unsigned long data)
static void compaction_free(struct folio *dst, unsigned long data)
{
struct compact_control *cc = (struct compact_control *)data;
list_add(&page->lru, &cc->freepages);
list_add(&dst->lru, &cc->freepages);
cc->nr_freepages++;
}

View File

@ -1195,24 +1195,22 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
* list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order.
*/
static struct page *new_page(struct page *page, unsigned long start)
static struct folio *new_folio(struct folio *src, unsigned long start)
{
struct folio *dst, *src = page_folio(page);
struct vm_area_struct *vma;
unsigned long address;
VMA_ITERATOR(vmi, current->mm, start);
gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
for_each_vma(vmi, vma) {
address = page_address_in_vma(page, vma);
address = page_address_in_vma(&src->page, vma);
if (address != -EFAULT)
break;
}
if (folio_test_hugetlb(src)) {
dst = alloc_hugetlb_folio_vma(folio_hstate(src),
return alloc_hugetlb_folio_vma(folio_hstate(src),
vma, address);
return &dst->page;
}
if (folio_test_large(src))
@ -1221,9 +1219,8 @@ static struct page *new_page(struct page *page, unsigned long start)
/*
* if !vma, vma_alloc_folio() will use task or system default policy
*/
dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
return vma_alloc_folio(gfp, folio_order(src), vma, address,
folio_test_large(src));
return &dst->page;
}
#else
@ -1239,7 +1236,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
return -ENOSYS;
}
static struct page *new_page(struct page *page, unsigned long start)
static struct folio *new_folio(struct folio *src, unsigned long start)
{
return NULL;
}
@ -1334,7 +1331,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_page, NULL,
nr_failed = migrate_pages(&pagelist, new_folio, NULL,
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
if (nr_failed)
putback_movable_pages(&pagelist);

View File

@ -1067,15 +1067,13 @@ static void migrate_folio_undo_src(struct folio *src,
}
/* Restore the destination folio to the original state upon failure */
static void migrate_folio_undo_dst(struct folio *dst,
bool locked,
free_page_t put_new_page,
unsigned long private)
static void migrate_folio_undo_dst(struct folio *dst, bool locked,
free_folio_t put_new_folio, unsigned long private)
{
if (locked)
folio_unlock(dst);
if (put_new_page)
put_new_page(&dst->page, private);
if (put_new_folio)
put_new_folio(dst, private);
else
folio_put(dst);
}
@ -1099,14 +1097,13 @@ static void migrate_folio_done(struct folio *src,
}
/* Obtain the lock on page, remove all ptes. */
static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
unsigned long private, struct folio *src,
struct folio **dstp, enum migrate_mode mode,
enum migrate_reason reason, struct list_head *ret)
static int migrate_folio_unmap(new_folio_t get_new_folio,
free_folio_t put_new_folio, unsigned long private,
struct folio *src, struct folio **dstp, enum migrate_mode mode,
enum migrate_reason reason, struct list_head *ret)
{
struct folio *dst;
int rc = -EAGAIN;
struct page *newpage = NULL;
int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
bool is_lru = !__PageMovable(&src->page);
@ -1123,10 +1120,9 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
return MIGRATEPAGE_SUCCESS;
}
newpage = get_new_page(&src->page, private);
if (!newpage)
dst = get_new_folio(src, private);
if (!dst)
return -ENOMEM;
dst = page_folio(newpage);
*dstp = dst;
dst->private = NULL;
@ -1254,13 +1250,13 @@ out:
ret = NULL;
migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);
migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
return rc;
}
/* Migrate the folio to the newly allocated folio in dst. */
static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
struct folio *src, struct folio *dst,
enum migrate_mode mode, enum migrate_reason reason,
struct list_head *ret)
@ -1332,7 +1328,7 @@ out:
}
migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
migrate_folio_undo_dst(dst, true, put_new_page, private);
migrate_folio_undo_dst(dst, true, put_new_folio, private);
return rc;
}
@ -1355,16 +1351,14 @@ out:
* because then pte is replaced with migration swap entry and direct I/O code
* will wait in the page fault for migration to complete.
*/
static int unmap_and_move_huge_page(new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
struct page *hpage, int force,
enum migrate_mode mode, int reason,
struct list_head *ret)
static int unmap_and_move_huge_page(new_folio_t get_new_folio,
free_folio_t put_new_folio, unsigned long private,
struct folio *src, int force, enum migrate_mode mode,
int reason, struct list_head *ret)
{
struct folio *dst, *src = page_folio(hpage);
struct folio *dst;
int rc = -EAGAIN;
int page_was_mapped = 0;
struct page *new_hpage;
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
@ -1374,10 +1368,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
return MIGRATEPAGE_SUCCESS;
}
new_hpage = get_new_page(hpage, private);
if (!new_hpage)
dst = get_new_folio(src, private);
if (!dst)
return -ENOMEM;
dst = page_folio(new_hpage);
if (!folio_trylock(src)) {
if (!force)
@ -1418,7 +1411,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* semaphore in write mode here and set TTU_RMAP_LOCKED
* to let lower levels know we have taken the lock.
*/
mapping = hugetlb_page_mapping_lock_write(hpage);
mapping = hugetlb_page_mapping_lock_write(&src->page);
if (unlikely(!mapping))
goto unlock_put_anon;
@ -1448,7 +1441,7 @@ put_anon:
if (rc == MIGRATEPAGE_SUCCESS) {
move_hugetlb_state(src, dst, reason);
put_new_page = NULL;
put_new_folio = NULL;
}
out_unlock:
@ -1464,8 +1457,8 @@ out:
* it. Otherwise, put_page() will drop the reference grabbed during
* isolation.
*/
if (put_new_page)
put_new_page(new_hpage, private);
if (put_new_folio)
put_new_folio(dst, private);
else
folio_putback_active_hugetlb(dst);
@ -1512,8 +1505,8 @@ struct migrate_pages_stats {
* exist any more. It is caller's responsibility to call putback_movable_pages()
* only if ret != 0.
*/
static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
free_folio_t put_new_folio, unsigned long private,
enum migrate_mode mode, int reason,
struct migrate_pages_stats *stats,
struct list_head *ret_folios)
@ -1551,9 +1544,9 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
continue;
}
rc = unmap_and_move_huge_page(get_new_page,
put_new_page, private,
&folio->page, pass > 2, mode,
rc = unmap_and_move_huge_page(get_new_folio,
put_new_folio, private,
folio, pass > 2, mode,
reason, ret_folios);
/*
* The rules are:
@ -1610,11 +1603,11 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
* deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
* length of the from list must be <= 1.
*/
static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
enum migrate_mode mode, int reason, struct list_head *ret_folios,
struct list_head *split_folios, struct migrate_pages_stats *stats,
int nr_pass)
static int migrate_pages_batch(struct list_head *from,
new_folio_t get_new_folio, free_folio_t put_new_folio,
unsigned long private, enum migrate_mode mode, int reason,
struct list_head *ret_folios, struct list_head *split_folios,
struct migrate_pages_stats *stats, int nr_pass)
{
int retry = 1;
int thp_retry = 1;
@ -1664,8 +1657,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
continue;
}
rc = migrate_folio_unmap(get_new_page, put_new_page, private,
folio, &dst, mode, reason, ret_folios);
rc = migrate_folio_unmap(get_new_folio, put_new_folio,
private, folio, &dst, mode, reason,
ret_folios);
/*
* The rules are:
* Success: folio will be freed
@ -1762,7 +1756,7 @@ move:
cond_resched();
rc = migrate_folio_move(put_new_page, private,
rc = migrate_folio_move(put_new_folio, private,
folio, dst, mode,
reason, ret_folios);
/*
@ -1808,7 +1802,7 @@ out:
migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
true, ret_folios);
list_del(&dst->lru);
migrate_folio_undo_dst(dst, true, put_new_page, private);
migrate_folio_undo_dst(dst, true, put_new_folio, private);
dst = dst2;
dst2 = list_next_entry(dst, lru);
}
@ -1816,10 +1810,11 @@ out:
return rc;
}
static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
enum migrate_mode mode, int reason, struct list_head *ret_folios,
struct list_head *split_folios, struct migrate_pages_stats *stats)
static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
free_folio_t put_new_folio, unsigned long private,
enum migrate_mode mode, int reason,
struct list_head *ret_folios, struct list_head *split_folios,
struct migrate_pages_stats *stats)
{
int rc, nr_failed = 0;
LIST_HEAD(folios);
@ -1827,7 +1822,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
memset(&astats, 0, sizeof(astats));
/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC,
rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
reason, &folios, split_folios, &astats,
NR_MAX_MIGRATE_ASYNC_RETRY);
stats->nr_succeeded += astats.nr_succeeded;
@ -1849,7 +1844,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
list_splice_tail_init(&folios, from);
while (!list_empty(from)) {
list_move(from->next, &folios);
rc = migrate_pages_batch(&folios, get_new_page, put_new_page,
rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
private, mode, reason, ret_folios,
split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
list_splice_tail_init(&folios, ret_folios);
@ -1866,11 +1861,11 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
* supplied as the target for the page migration
*
* @from: The list of folios to be migrated.
* @get_new_page: The function used to allocate free folios to be used
* @get_new_folio: The function used to allocate free folios to be used
* as the target of the folio migration.
* @put_new_page: The function used to free target folios if migration
* @put_new_folio: The function used to free target folios if migration
* fails, or NULL if no special handling is necessary.
* @private: Private data to be passed on to get_new_page()
* @private: Private data to be passed on to get_new_folio()
* @mode: The migration mode that specifies the constraints for
* folio migration, if any.
* @reason: The reason for folio migration.
@ -1887,8 +1882,8 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page,
* considered as the number of non-migrated large folio, no matter how many
* split folios of the large folio are migrated successfully.
*/
int migrate_pages(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
free_folio_t put_new_folio, unsigned long private,
enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{
int rc, rc_gather;
@ -1903,7 +1898,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
memset(&stats, 0, sizeof(stats));
rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private,
rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
mode, reason, &stats, &ret_folios);
if (rc_gather < 0)
goto out;
@ -1926,12 +1921,14 @@ again:
else
list_splice_init(from, &folios);
if (mode == MIGRATE_ASYNC)
rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private,
mode, reason, &ret_folios, &split_folios, &stats,
NR_MAX_MIGRATE_PAGES_RETRY);
rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
private, mode, reason, &ret_folios,
&split_folios, &stats,
NR_MAX_MIGRATE_PAGES_RETRY);
else
rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private,
mode, reason, &ret_folios, &split_folios, &stats);
rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
private, mode, reason, &ret_folios,
&split_folios, &stats);
list_splice_tail_init(&folios, &ret_folios);
if (rc < 0) {
rc_gather = rc;
@ -1944,8 +1941,9 @@ again:
* is counted as 1 failure already. And, we only try to migrate
* with minimal effort, force MIGRATE_ASYNC mode and retry once.
*/
migrate_pages_batch(&split_folios, get_new_page, put_new_page, private,
MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1);
migrate_pages_batch(&split_folios, get_new_folio,
put_new_folio, private, MIGRATE_ASYNC, reason,
&ret_folios, NULL, &stats, 1);
list_splice_tail_init(&split_folios, &ret_folios);
}
rc_gather += rc;
@ -1980,14 +1978,11 @@ out:
return rc_gather;
}
struct page *alloc_migration_target(struct page *page, unsigned long private)
struct folio *alloc_migration_target(struct folio *src, unsigned long private)
{
struct folio *folio = page_folio(page);
struct migration_target_control *mtc;
gfp_t gfp_mask;
unsigned int order = 0;
struct folio *hugetlb_folio = NULL;
struct folio *new_folio = NULL;
int nid;
int zidx;
@ -1995,33 +1990,30 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
gfp_mask = mtc->gfp_mask;
nid = mtc->nid;
if (nid == NUMA_NO_NODE)
nid = folio_nid(folio);
nid = folio_nid(src);
if (folio_test_hugetlb(folio)) {
struct hstate *h = folio_hstate(folio);
if (folio_test_hugetlb(src)) {
struct hstate *h = folio_hstate(src);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid,
return alloc_hugetlb_folio_nodemask(h, nid,
mtc->nmask, gfp_mask);
return &hugetlb_folio->page;
}
if (folio_test_large(folio)) {
if (folio_test_large(src)) {
/*
* clear __GFP_RECLAIM to make the migration callback
* consistent with regular THP allocations.
*/
gfp_mask &= ~__GFP_RECLAIM;
gfp_mask |= GFP_TRANSHUGE;
order = folio_order(folio);
order = folio_order(src);
}
zidx = zone_idx(folio_zone(folio));
zidx = zone_idx(folio_zone(src));
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;
new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
return &new_folio->page;
return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
}
#ifdef CONFIG_NUMA
@ -2472,13 +2464,12 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
return false;
}
static struct page *alloc_misplaced_dst_page(struct page *page,
static struct folio *alloc_misplaced_dst_folio(struct folio *src,
unsigned long data)
{
int nid = (int) data;
int order = compound_order(page);
int order = folio_order(src);
gfp_t gfp = __GFP_THISNODE;
struct folio *new;
if (order > 0)
gfp |= GFP_TRANSHUGE_LIGHT;
@ -2487,9 +2478,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
__GFP_NOWARN;
gfp &= ~__GFP_RECLAIM;
}
new = __folio_alloc_node(gfp, order, nid);
return &new->page;
return __folio_alloc_node(gfp, order, nid);
}
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
@ -2567,7 +2556,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
goto out;
list_add(&page->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED, &nr_succeeded);
if (nr_remaining) {

View File

@ -1621,9 +1621,10 @@ static void folio_check_dirty_writeback(struct folio *folio,
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
}
static struct page *alloc_demote_page(struct page *page, unsigned long private)
static struct folio *alloc_demote_folio(struct folio *src,
unsigned long private)
{
struct page *target_page;
struct folio *dst;
nodemask_t *allowed_mask;
struct migration_target_control *mtc;
@ -1641,14 +1642,14 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private)
*/
mtc->nmask = NULL;
mtc->gfp_mask |= __GFP_THISNODE;
target_page = alloc_migration_target(page, (unsigned long)mtc);
if (target_page)
return target_page;
dst = alloc_migration_target(src, (unsigned long)mtc);
if (dst)
return dst;
mtc->gfp_mask &= ~__GFP_THISNODE;
mtc->nmask = allowed_mask;
return alloc_migration_target(page, (unsigned long)mtc);
return alloc_migration_target(src, (unsigned long)mtc);
}
/*
@ -1683,7 +1684,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
node_get_allowed_targets(pgdat, &allowed_mask);
/* Demotion ignores all cpuset and mempolicy settings */
migrate_pages(demote_folios, alloc_demote_page, NULL,
migrate_pages(demote_folios, alloc_demote_folio, NULL,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded);