mm/migrate: Add folio_migrate_flags()

Turn migrate_page_states() into a wrapper around folio_migrate_flags().
Also convert two functions only called from folio_migrate_flags() to
be folio-based.  ksm_migrate_page() becomes folio_migrate_ksm() and
copy_page_owner() becomes folio_copy_owner().  folio_migrate_flags()
alone shrinks by two thirds -- 1967 bytes down to 642 bytes.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: David Howells <dhowells@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Matthew Wilcox (Oracle) 2021-05-07 15:26:29 -04:00
parent 3417013e0d
commit 19138349ed
7 changed files with 77 additions and 67 deletions

View File

@ -52,7 +52,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */
@ -83,7 +83,7 @@ static inline void rmap_walk_ksm(struct page *page,
{
}
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old)
{
}
#endif /* CONFIG_MMU */

View File

@ -57,6 +57,7 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int extra_count);
#else

View File

@ -12,7 +12,7 @@ extern void __reset_page_owner(struct page *page, unsigned int order);
extern void __set_page_owner(struct page *page,
unsigned int order, gfp_t gfp_mask);
extern void __split_page_owner(struct page *page, unsigned int nr);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@ -36,10 +36,10 @@ static inline void split_page_owner(struct page *page, unsigned int nr)
if (static_branch_unlikely(&page_owner_inited))
__split_page_owner(page, nr);
}
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
{
if (static_branch_unlikely(&page_owner_inited))
__copy_page_owner(oldpage, newpage);
__folio_copy_owner(newfolio, old);
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
@ -63,7 +63,7 @@ static inline void split_page_owner(struct page *page,
unsigned int order)
{
}
static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
static inline void set_page_owner_migrate_reason(struct page *page, int reason)

View File

@ -58,4 +58,10 @@ int migrate_page_move_mapping(struct address_space *mapping,
page_folio(page), extra_count);
}
EXPORT_SYMBOL(migrate_page_move_mapping);
void migrate_page_states(struct page *newpage, struct page *page)
{
folio_migrate_flags(page_folio(newpage), page_folio(page));
}
EXPORT_SYMBOL(migrate_page_states);
#endif

View File

@ -751,7 +751,7 @@ stale:
/*
* We come here from above when page->mapping or !PageSwapCache
* suggests that the node is stale; but it might be under migration.
* We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
* We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
* before checking whether node->kpfn has been changed.
*/
smp_rmb();
@ -852,9 +852,14 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma,
return err;
}
static inline struct stable_node *folio_stable_node(struct folio *folio)
{
return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
}
static inline struct stable_node *page_stable_node(struct page *page)
{
return PageKsm(page) ? page_rmapping(page) : NULL;
return folio_stable_node(page_folio(page));
}
static inline void set_page_stable_node(struct page *page,
@ -2659,26 +2664,26 @@ again:
}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
{
struct stable_node *stable_node;
VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
stable_node = page_stable_node(newpage);
stable_node = folio_stable_node(folio);
if (stable_node) {
VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
stable_node->kpfn = page_to_pfn(newpage);
VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
stable_node->kpfn = folio_pfn(newfolio);
/*
* newpage->mapping was set in advance; now we need smp_wmb()
* newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible
* to get_ksm_page() before it can see that oldpage->mapping
* has gone stale (or that PageSwapCache has been cleared).
* to get_ksm_page() before it can see that folio->mapping
* has gone stale (or that folio_test_swapcache has been cleared).
*/
smp_wmb();
set_page_stable_node(oldpage, NULL);
set_page_stable_node(&folio->page, NULL);
}
}
#endif /* CONFIG_MIGRATION */

View File

@ -539,82 +539,80 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
}
/*
* Copy the page to its new location
* Copy the flags and some other ancillary information
*/
void migrate_page_states(struct page *newpage, struct page *page)
void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
{
struct folio *folio = page_folio(page);
struct folio *newfolio = page_folio(newpage);
int cpupid;
if (PageError(page))
SetPageError(newpage);
if (PageReferenced(page))
SetPageReferenced(newpage);
if (PageUptodate(page))
SetPageUptodate(newpage);
if (TestClearPageActive(page)) {
VM_BUG_ON_PAGE(PageUnevictable(page), page);
SetPageActive(newpage);
} else if (TestClearPageUnevictable(page))
SetPageUnevictable(newpage);
if (PageWorkingset(page))
SetPageWorkingset(newpage);
if (PageChecked(page))
SetPageChecked(newpage);
if (PageMappedToDisk(page))
SetPageMappedToDisk(newpage);
if (folio_test_error(folio))
folio_set_error(newfolio);
if (folio_test_referenced(folio))
folio_set_referenced(newfolio);
if (folio_test_uptodate(folio))
folio_mark_uptodate(newfolio);
if (folio_test_clear_active(folio)) {
VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
folio_set_active(newfolio);
} else if (folio_test_clear_unevictable(folio))
folio_set_unevictable(newfolio);
if (folio_test_workingset(folio))
folio_set_workingset(newfolio);
if (folio_test_checked(folio))
folio_set_checked(newfolio);
if (folio_test_mappedtodisk(folio))
folio_set_mappedtodisk(newfolio);
/* Move dirty on pages not done by folio_migrate_mapping() */
if (PageDirty(page))
SetPageDirty(newpage);
if (folio_test_dirty(folio))
folio_set_dirty(newfolio);
if (page_is_young(page))
set_page_young(newpage);
if (page_is_idle(page))
set_page_idle(newpage);
if (folio_test_young(folio))
folio_set_young(newfolio);
if (folio_test_idle(folio))
folio_set_idle(newfolio);
/*
* Copy NUMA information to the new page, to prevent over-eager
* future migrations of this same page.
*/
cpupid = page_cpupid_xchg_last(page, -1);
page_cpupid_xchg_last(newpage, cpupid);
cpupid = page_cpupid_xchg_last(&folio->page, -1);
page_cpupid_xchg_last(&newfolio->page, cpupid);
ksm_migrate_page(newpage, page);
folio_migrate_ksm(newfolio, folio);
/*
* Please do not reorder this without considering how mm/ksm.c's
* get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
*/
if (PageSwapCache(page))
ClearPageSwapCache(page);
ClearPagePrivate(page);
if (folio_test_swapcache(folio))
folio_clear_swapcache(folio);
folio_clear_private(folio);
/* page->private contains hugetlb specific flags */
if (!PageHuge(page))
set_page_private(page, 0);
if (!folio_test_hugetlb(folio))
folio->private = NULL;
/*
* If any waiters have accumulated on the new page then
* wake them up.
*/
if (PageWriteback(newpage))
end_page_writeback(newpage);
if (folio_test_writeback(newfolio))
folio_end_writeback(newfolio);
/*
* PG_readahead shares the same bit with PG_reclaim. The above
* end_page_writeback() may clear PG_readahead mistakenly, so set the
* bit after that.
*/
if (PageReadahead(page))
SetPageReadahead(newpage);
if (folio_test_readahead(folio))
folio_set_readahead(newfolio);
copy_page_owner(page, newpage);
folio_copy_owner(newfolio, folio);
if (!PageHuge(page))
if (!folio_test_hugetlb(folio))
mem_cgroup_migrate(folio, newfolio);
}
EXPORT_SYMBOL(migrate_page_states);
EXPORT_SYMBOL(folio_migrate_flags);
void migrate_page_copy(struct page *newpage, struct page *page)
{
@ -655,7 +653,7 @@ int migrate_page(struct address_space *mapping,
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
migrate_page_states(newpage, page);
folio_migrate_flags(newfolio, folio);
return MIGRATEPAGE_SUCCESS;
}
EXPORT_SYMBOL(migrate_page);

View File

@ -210,10 +210,10 @@ void __split_page_owner(struct page *page, unsigned int nr)
}
}
void __copy_page_owner(struct page *oldpage, struct page *newpage)
void __folio_copy_owner(struct folio *newfolio, struct folio *old)
{
struct page_ext *old_ext = lookup_page_ext(oldpage);
struct page_ext *new_ext = lookup_page_ext(newpage);
struct page_ext *old_ext = lookup_page_ext(&old->page);
struct page_ext *new_ext = lookup_page_ext(&newfolio->page);
struct page_owner *old_page_owner, *new_page_owner;
if (unlikely(!old_ext || !new_ext))
@ -231,11 +231,11 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
new_page_owner->free_ts_nsec = old_page_owner->ts_nsec;
/*
* We don't clear the bit on the oldpage as it's going to be freed
* We don't clear the bit on the old folio as it's going to be freed
* after migration. Until then, the info can be useful in case of
* a bug, and the overall stats will be off a bit only temporarily.
* Also, migrate_misplaced_transhuge_page() can still fail the
* migration and then we want the oldpage to retain the info. But
* migration and then we want the old folio to retain the info. But
* in that case we also don't need to explicitly clear the info from
* the new page, which will be freed.
*/