migrate_pages: move THP/hugetlb migration support check to simplify code

This is a code cleanup patch, no functionality change is expected.  After
the change, the line number reduces especially in the long
migrate_pages_batch().

Link: https://lkml.kernel.org/r/20230213123444.155149-10-ying.huang@intel.com
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Suggested-by: Alistair Popple <apopple@nvidia.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Bharata B Rao <bharata@amd.com>
Cc: Xin Hao <xhao@linux.alibaba.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Huang Ying 2023-02-13 20:34:44 +08:00 committed by Andrew Morton
parent 7e12beb8ca
commit 6f7d760e86

View file

@ -1117,9 +1117,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page
bool locked = false;
bool dst_locked = false;
if (!thp_migration_supported() && folio_test_transhuge(src))
return -ENOSYS;
if (folio_ref_count(src) == 1) {
/* Folio was freed from under us. So we are done. */
folio_clear_active(src);
@ -1380,16 +1377,6 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
struct anon_vma *anon_vma = NULL;
struct address_space *mapping = NULL;
/*
* Migratability of hugepages depends on architectures and their size.
* This check is necessary because some callers of hugepage migration
* like soft offline and memory hotremove don't walk through page
* tables or check whether the hugepage is pmd-based or not before
* kicking migration.
*/
if (!hugepage_migration_supported(page_hstate(hpage)))
return -ENOSYS;
if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
folio_putback_active_hugetlb(src);
@ -1556,6 +1543,20 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
cond_resched();
/*
* Migratability of hugepages depends on architectures and
* their size. This check is necessary because some callers
* of hugepage migration like soft offline and memory
* hotremove don't walk through page tables or check whether
* the hugepage is pmd-based or not before kicking migration.
*/
if (!hugepage_migration_supported(folio_hstate(folio))) {
nr_failed++;
stats->nr_failed_pages += nr_pages;
list_move_tail(&folio->lru, ret_folios);
continue;
}
rc = unmap_and_move_huge_page(get_new_page,
put_new_page, private,
&folio->page, pass > 2, mode,
@ -1565,16 +1566,9 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
* Success: hugetlb folio will be put back
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
* -ENOSYS: stay on the from list
* Other errno: put on ret_folios list
*/
switch(rc) {
case -ENOSYS:
/* Hugetlb migration is unsupported */
nr_failed++;
stats->nr_failed_pages += nr_pages;
list_move_tail(&folio->lru, ret_folios);
break;
case -ENOMEM:
/*
* When memory is low, don't bother to try to migrate
@ -1664,6 +1658,28 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
cond_resched();
/*
* Large folio migration might be unsupported or
* the allocation might be failed so we should retry
* on the same folio with the large folio split
* to normal folios.
*
* Split folios are put in split_folios, and
* we will migrate them after the rest of the
* list is processed.
*/
if (!thp_migration_supported() && is_thp) {
nr_large_failed++;
stats->nr_thp_failed++;
if (!try_split_folio(folio, &split_folios)) {
stats->nr_thp_split++;
continue;
}
stats->nr_failed_pages += nr_pages;
list_move_tail(&folio->lru, ret_folios);
continue;
}
rc = migrate_folio_unmap(get_new_page, put_new_page, private,
folio, &dst, pass > 2, avoid_force_lock,
mode, reason, ret_folios);
@ -1675,36 +1691,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
* -EAGAIN: stay on the from list
* -EDEADLOCK: stay on the from list
* -ENOMEM: stay on the from list
* -ENOSYS: stay on the from list
* Other errno: put on ret_folios list
*/
switch(rc) {
/*
* Large folio migration might be unsupported or
* the allocation could've failed so we should retry
* on the same folio with the large folio split
* to normal folios.
*
* Split folios are put in split_folios, and
* we will migrate them after the rest of the
* list is processed.
*/
case -ENOSYS:
/* Large folio migration is unsupported */
if (is_large) {
nr_large_failed++;
stats->nr_thp_failed += is_thp;
if (!try_split_folio(folio, &split_folios)) {
stats->nr_thp_split += is_thp;
break;
}
} else if (!no_split_folio_counting) {
nr_failed++;
}
stats->nr_failed_pages += nr_pages;
list_move_tail(&folio->lru, ret_folios);
break;
case -ENOMEM:
/*
* When memory is low, don't bother to try to migrate