mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
mm: migrate: add isolate_folio_to_list()
Add isolate_folio_to_list() helper to try to isolate HugeTLB, no-LRU movable and LRU folios to a list, which will be reused by do_migrate_range() from memory hotplug soon, also drop the mf_isolate_folio() since we could directly use new helper in the soft_offline_in_use_page(). Link: https://lkml.kernel.org/r/20240827114728.3212578-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Miaohe Lin <linmiaohe@huawei.com> Tested-by: Miaohe Lin <linmiaohe@huawei.com> Cc: Dan Carpenter <dan.carpenter@linaro.org> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
e8a796fa1c
commit
f1264e9531
3 changed files with 42 additions and 35 deletions
|
@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
|
|||
unsigned int *ret_succeeded);
|
||||
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
|
||||
bool isolate_movable_page(struct page *page, isolate_mode_t mode);
|
||||
bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
|
||||
|
||||
int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
struct folio *dst, struct folio *src);
|
||||
|
@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src,
|
|||
{ return NULL; }
|
||||
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
|
||||
{ return false; }
|
||||
static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
|
||||
{ return false; }
|
||||
|
||||
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
||||
struct folio *dst, struct folio *src)
|
||||
|
|
|
@ -2653,40 +2653,6 @@ EXPORT_SYMBOL(unpoison_memory);
|
|||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Soft offline: " fmt
|
||||
|
||||
static bool mf_isolate_folio(struct folio *folio, struct list_head *pagelist)
|
||||
{
|
||||
bool isolated = false;
|
||||
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
isolated = isolate_hugetlb(folio, pagelist);
|
||||
} else {
|
||||
bool lru = !__folio_test_movable(folio);
|
||||
|
||||
if (lru)
|
||||
isolated = folio_isolate_lru(folio);
|
||||
else
|
||||
isolated = isolate_movable_page(&folio->page,
|
||||
ISOLATE_UNEVICTABLE);
|
||||
|
||||
if (isolated) {
|
||||
list_add(&folio->lru, pagelist);
|
||||
if (lru)
|
||||
node_stat_add_folio(folio, NR_ISOLATED_ANON +
|
||||
folio_is_file_lru(folio));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we succeed to isolate the folio, we grabbed another refcount on
|
||||
* the folio, so we can safely drop the one we got from get_any_page().
|
||||
* If we failed to isolate the folio, it means that we cannot go further
|
||||
* and we will return an error, so drop the reference we got from
|
||||
* get_any_page() as well.
|
||||
*/
|
||||
folio_put(folio);
|
||||
return isolated;
|
||||
}
|
||||
|
||||
/*
|
||||
* soft_offline_in_use_page handles hugetlb-pages and non-hugetlb pages.
|
||||
* If the page is a non-dirty unmapped page-cache page, it simply invalidates.
|
||||
|
@ -2699,6 +2665,7 @@ static int soft_offline_in_use_page(struct page *page)
|
|||
struct folio *folio = page_folio(page);
|
||||
char const *msg_page[] = {"page", "hugepage"};
|
||||
bool huge = folio_test_hugetlb(folio);
|
||||
bool isolated;
|
||||
LIST_HEAD(pagelist);
|
||||
struct migration_target_control mtc = {
|
||||
.nid = NUMA_NO_NODE,
|
||||
|
@ -2738,7 +2705,18 @@ static int soft_offline_in_use_page(struct page *page)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (mf_isolate_folio(folio, &pagelist)) {
|
||||
isolated = isolate_folio_to_list(folio, &pagelist);
|
||||
|
||||
/*
|
||||
* If we succeed to isolate the folio, we grabbed another refcount on
|
||||
* the folio, so we can safely drop the one we got from get_any_page().
|
||||
* If we failed to isolate the folio, it means that we cannot go further
|
||||
* and we will return an error, so drop the reference we got from
|
||||
* get_any_page() as well.
|
||||
*/
|
||||
folio_put(folio);
|
||||
|
||||
if (isolated) {
|
||||
ret = migrate_pages(&pagelist, alloc_migration_target, NULL,
|
||||
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_FAILURE, NULL);
|
||||
if (!ret) {
|
||||
|
|
26
mm/migrate.c
26
mm/migrate.c
|
@ -178,6 +178,32 @@ void putback_movable_pages(struct list_head *l)
|
|||
}
|
||||
}
|
||||
|
||||
/* Must be called with an elevated refcount on the non-hugetlb folio */
|
||||
bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
|
||||
{
|
||||
bool isolated, lru;
|
||||
|
||||
if (folio_test_hugetlb(folio))
|
||||
return isolate_hugetlb(folio, list);
|
||||
|
||||
lru = !__folio_test_movable(folio);
|
||||
if (lru)
|
||||
isolated = folio_isolate_lru(folio);
|
||||
else
|
||||
isolated = isolate_movable_page(&folio->page,
|
||||
ISOLATE_UNEVICTABLE);
|
||||
|
||||
if (!isolated)
|
||||
return false;
|
||||
|
||||
list_add(&folio->lru, list);
|
||||
if (lru)
|
||||
node_stat_add_folio(folio, NR_ISOLATED_ANON +
|
||||
folio_is_file_lru(folio));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore a potential migration pte to a working pte entry
|
||||
*/
|
||||
|
|
Loading…
Reference in a new issue