hugetlb: Convert to migrate_folio

This involves converting migrate_huge_page_move_mapping().  We also need a
folio variant of hugetlb_set_page_subpool(), but that's for a later patch.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-06 10:47:21 -04:00
parent 3648951ceb
commit b890ec2a2c
3 changed files with 26 additions and 21 deletions

View File

@ -954,28 +954,33 @@ static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
return error;
}
static int hugetlbfs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page,
#ifdef CONFIG_MIGRATION
static int hugetlbfs_migrate_folio(struct address_space *mapping,
struct folio *dst, struct folio *src,
enum migrate_mode mode)
{
int rc;
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
rc = migrate_huge_page_move_mapping(mapping, dst, src);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
if (hugetlb_page_subpool(page)) {
hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
hugetlb_set_page_subpool(page, NULL);
if (hugetlb_page_subpool(&src->page)) {
hugetlb_set_page_subpool(&dst->page,
hugetlb_page_subpool(&src->page));
hugetlb_set_page_subpool(&src->page, NULL);
}
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
folio_migrate_copy(dst, src);
else
migrate_page_states(newpage, page);
folio_migrate_flags(dst, src);
return MIGRATEPAGE_SUCCESS;
}
#else
#define hugetlbfs_migrate_folio NULL
#endif
static int hugetlbfs_error_remove_page(struct address_space *mapping,
struct page *page)
@ -1142,7 +1147,7 @@ static const struct address_space_operations hugetlbfs_aops = {
.write_begin = hugetlbfs_write_begin,
.write_end = hugetlbfs_write_end,
.dirty_folio = noop_dirty_folio,
.migratepage = hugetlbfs_migrate_page,
.migrate_folio = hugetlbfs_migrate_folio,
.error_remove_page = hugetlbfs_error_remove_page,
};

View File

@ -72,8 +72,8 @@ extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
extern void migrate_page_states(struct page *newpage, struct page *page);
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
@ -104,7 +104,7 @@ static inline void migrate_page_copy(struct page *newpage,
struct page *page) {}
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
struct folio *dst, struct folio *src)
{
return -ENOSYS;
}

View File

@ -474,26 +474,26 @@ EXPORT_SYMBOL(folio_migrate_mapping);
* of folio_migrate_mapping().
*/
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
struct folio *dst, struct folio *src)
{
XA_STATE(xas, &mapping->i_pages, page_index(page));
XA_STATE(xas, &mapping->i_pages, folio_index(src));
int expected_count;
xas_lock_irq(&xas);
expected_count = 2 + page_has_private(page);
if (!page_ref_freeze(page, expected_count)) {
expected_count = 2 + folio_has_private(src);
if (!folio_ref_freeze(src, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
newpage->index = page->index;
newpage->mapping = page->mapping;
dst->index = src->index;
dst->mapping = src->mapping;
get_page(newpage);
folio_get(dst);
xas_store(&xas, newpage);
xas_store(&xas, dst);
page_ref_unfreeze(page, expected_count - 1);
folio_ref_unfreeze(src, expected_count - 1);
xas_unlock_irq(&xas);