mm/rmap: Convert rmap_walk() to take a folio

This ripples all the way through to every calling and called function
from rmap.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-01-29 16:06:53 -05:00
parent e05b34539d
commit 2f031c6f04
9 changed files with 80 additions and 99 deletions

View File

@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm)
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */
@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
static inline void rmap_walk_ksm(struct page *page,
static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}

View File

@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
/*
* Called by memory-failure.c to kill processes.
*/
struct anon_vma *page_lock_anon_vma_read(struct page *page);
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
@ -286,15 +285,15 @@ struct rmap_walk_control {
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
struct anon_vma *(*anon_lock)(struct page *page);
int (*done)(struct folio *folio);
struct anon_vma *(*anon_lock)(struct folio *folio);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */

View File

@ -16,10 +16,10 @@
#include "../internal.h"
#include "prmtv-common.h"
static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr)
struct page *page = damon_get_page(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
.rmap_one = __damon_pa_mkold,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr)
if (need_lock && !folio_trylock(folio))
goto out;
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
@ -87,10 +87,9 @@ struct damon_pa_access_chk_result {
bool accessed;
};
static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct folio *folio = page_folio(page);
struct damon_pa_access_chk_result *result = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
struct rmap_walk_control rwc = {
.arg = &result,
.rmap_one = __damon_pa_young,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
return NULL;
}
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);

View File

@ -164,10 +164,3 @@ void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}
#ifdef CONFIG_MMU
struct anon_vma *page_lock_anon_vma_read(struct page *page)
{
return folio_lock_anon_vma_read(page_folio(page));
}
#endif

View File

@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
* reference to it and then lock the anon_vma for write. This
* is similar to page_lock_anon_vma_read except the write lock
* is similar to folio_lock_anon_vma_read except the write lock
* is taken to serialise against parallel split or collapse
* operations.
*/

View File

@ -2588,21 +2588,21 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
}
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
int search_new_forks = 0;
VM_BUG_ON_PAGE(!PageKsm(page), page);
VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
/*
* Rely on the page lock to protect against concurrent modifications
* to that page's node of the stable tree.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
stable_node = page_stable_node(page);
stable_node = folio_stable_node(folio);
if (!stable_node)
return;
again:
@ -2637,11 +2637,11 @@ again:
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
anon_vma_unlock_read(anon_vma);
return;
}
if (rwc->done && rwc->done(page)) {
if (rwc->done && rwc->done(folio)) {
anon_vma_unlock_read(anon_vma);
return;
}

View File

@ -171,13 +171,11 @@ void putback_movable_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
static bool remove_migration_pte(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr, void *old)
{
struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
pte_t pte;
swp_entry_t entry;
@ -269,9 +267,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
};
if (locked)
rmap_walk_locked(&dst->page, &rwc);
rmap_walk_locked(dst, &rwc);
else
rmap_walk(&dst->page, &rwc);
rmap_walk(dst, &rwc);
}
/*

View File

@ -46,11 +46,10 @@ static struct page *page_idle_get_page(unsigned long pfn)
return page;
}
static bool page_idle_clear_pte_refs_one(struct page *page,
static bool page_idle_clear_pte_refs_one(struct folio *folio,
struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
bool referenced = false;
@ -93,7 +92,7 @@ static void page_idle_clear_pte_refs(struct page *page)
*/
static const struct rmap_walk_control rwc = {
.rmap_one = page_idle_clear_pte_refs_one,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
@ -104,7 +103,7 @@ static void page_idle_clear_pte_refs(struct page *page)
if (need_lock && !folio_trylock(folio))
return;
rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
rmap_walk(folio, (struct rmap_walk_control *)&rwc);
if (need_lock)
folio_unlock(folio);

111
mm/rmap.c
View File

@ -107,15 +107,15 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
VM_BUG_ON(atomic_read(&anon_vma->refcount));
/*
* Synchronize against page_lock_anon_vma_read() such that
* Synchronize against folio_lock_anon_vma_read() such that
* we can safely hold the lock without the anon_vma getting
* freed.
*
* Relies on the full mb implied by the atomic_dec_and_test() from
* put_anon_vma() against the acquire barrier implied by
* down_read_trylock() from page_lock_anon_vma_read(). This orders:
* down_read_trylock() from folio_lock_anon_vma_read(). This orders:
*
* page_lock_anon_vma_read() VS put_anon_vma()
* folio_lock_anon_vma_read() VS put_anon_vma()
* down_read_trylock() atomic_dec_and_test()
* LOCK MB
* atomic_read() rwsem_is_locked()
@ -168,7 +168,7 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
* allocate a new one.
*
* Anon-vma allocations are very subtle, because we may have
* optimistically looked up an anon_vma in page_lock_anon_vma_read()
* optimistically looked up an anon_vma in folio_lock_anon_vma_read()
* and that may actually touch the rwsem even in the newly
* allocated vma (it depends on RCU to make sure that the
* anon_vma isn't actually destroyed).
@ -799,10 +799,9 @@ struct folio_referenced_arg {
/*
* arg: folio_referenced_arg will be passed
*/
static bool folio_referenced_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
static bool folio_referenced_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *arg)
{
struct folio *folio = page_folio(page);
struct folio_referenced_arg *pra = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
int referenced = 0;
@ -894,7 +893,7 @@ int folio_referenced(struct folio *folio, int is_locked,
struct rmap_walk_control rwc = {
.rmap_one = folio_referenced_one,
.arg = (void *)&pra,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
*vm_flags = 0;
@ -919,7 +918,7 @@ int folio_referenced(struct folio *folio, int is_locked,
rwc.invalid_vma = invalid_folio_referenced_vma;
}
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
*vm_flags = pra.vm_flags;
if (we_locked)
@ -928,10 +927,9 @@ int folio_referenced(struct folio *folio, int is_locked,
return pra.referenced;
}
static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
static bool page_mkclean_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, PVMW_SYNC);
struct mmu_notifier_range range;
int *cleaned = arg;
@ -1025,7 +1023,7 @@ int folio_mkclean(struct folio *folio)
if (!mapping)
return 0;
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
return cleaned;
}
@ -1410,10 +1408,9 @@ out:
/*
* @arg: enum ttu_flags will be passed to this argument
*/
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
pte_t pteval;
@ -1667,9 +1664,9 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
return vma_is_temporary_stack(vma);
}
static int page_not_mapped(struct page *page)
static int page_not_mapped(struct folio *folio)
{
return !page_mapped(page);
return !folio_mapped(folio);
}
/**
@ -1689,13 +1686,13 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
if (flags & TTU_RMAP_LOCKED)
rmap_walk_locked(&folio->page, &rwc);
rmap_walk_locked(folio, &rwc);
else
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
}
/*
@ -1704,10 +1701,9 @@ void try_to_unmap(struct folio *folio, enum ttu_flags flags)
* If TTU_SPLIT_HUGE_PMD is specified any PMD mappings will be split into PTEs
* containing migration entries.
*/
static bool try_to_migrate_one(struct page *page, struct vm_area_struct *vma,
static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
pte_t pteval;
@ -1951,7 +1947,7 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
.rmap_one = try_to_migrate_one,
.arg = (void *)flags,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
/*
@ -1977,9 +1973,9 @@ void try_to_migrate(struct folio *folio, enum ttu_flags flags)
rwc.invalid_vma = invalid_migration_vma;
if (flags & TTU_RMAP_LOCKED)
rmap_walk_locked(&folio->page, &rwc);
rmap_walk_locked(folio, &rwc);
else
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
}
#ifdef CONFIG_DEVICE_PRIVATE
@ -1990,10 +1986,9 @@ struct make_exclusive_args {
bool valid;
};
static bool page_make_device_exclusive_one(struct page *page,
static bool page_make_device_exclusive_one(struct folio *folio,
struct vm_area_struct *vma, unsigned long address, void *priv)
{
struct folio *folio = page_folio(page);
struct mm_struct *mm = vma->vm_mm;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
struct make_exclusive_args *args = priv;
@ -2098,7 +2093,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
struct rmap_walk_control rwc = {
.rmap_one = page_make_device_exclusive_one,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
.arg = &args,
};
@ -2109,7 +2104,7 @@ static bool folio_make_device_exclusive(struct folio *folio,
if (!folio_test_anon(folio))
return false;
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
return args.valid && !folio_mapcount(folio);
}
@ -2177,17 +2172,16 @@ void __put_anon_vma(struct anon_vma *anon_vma)
anon_vma_free(root);
}
static struct anon_vma *rmap_walk_anon_lock(struct page *page,
static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
struct rmap_walk_control *rwc)
{
struct folio *folio = page_folio(page);
struct anon_vma *anon_vma;
if (rwc->anon_lock)
return rwc->anon_lock(page);
return rwc->anon_lock(folio);
/*
* Note: remove_migration_ptes() cannot use page_lock_anon_vma_read()
* Note: remove_migration_ptes() cannot use folio_lock_anon_vma_read()
* because that depends on page_mapped(); but not all its usages
* are holding mmap_lock. Users without mmap_lock are required to
* take a reference count to prevent the anon_vma disappearing
@ -2209,10 +2203,9 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*/
static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc,
bool locked)
{
struct folio *folio = page_folio(page);
struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
@ -2222,17 +2215,17 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
/* anon_vma disappear under us? */
VM_BUG_ON_FOLIO(!anon_vma, folio);
} else {
anon_vma = rmap_walk_anon_lock(page, rwc);
anon_vma = rmap_walk_anon_lock(folio, rwc);
}
if (!anon_vma)
return;
pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
pgoff_start = folio_pgoff(folio);
pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
unsigned long address = vma_address(&folio->page, vma);
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@ -2240,9 +2233,9 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
if (!rwc->rmap_one(page, vma, address, rwc->arg))
if (!rwc->rmap_one(folio, vma, address, rwc->arg))
break;
if (rwc->done && rwc->done(page))
if (rwc->done && rwc->done(folio))
break;
}
@ -2258,10 +2251,10 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*/
static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc,
bool locked)
{
struct address_space *mapping = page_mapping(page);
struct address_space *mapping = folio_mapping(folio);
pgoff_t pgoff_start, pgoff_end;
struct vm_area_struct *vma;
@ -2271,18 +2264,18 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
* structure at mapping cannot be freed and reused yet,
* so we can safely take mapping->i_mmap_rwsem.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
if (!mapping)
return;
pgoff_start = page_to_pgoff(page);
pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
pgoff_start = folio_pgoff(folio);
pgoff_end = pgoff_start + folio_nr_pages(folio) - 1;
if (!locked)
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);
unsigned long address = vma_address(&folio->page, vma);
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
@ -2290,9 +2283,9 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
if (!rwc->rmap_one(page, vma, address, rwc->arg))
if (!rwc->rmap_one(folio, vma, address, rwc->arg))
goto done;
if (rwc->done && rwc->done(page))
if (rwc->done && rwc->done(folio))
goto done;
}
@ -2301,25 +2294,25 @@ done:
i_mmap_unlock_read(mapping);
}
void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
{
if (unlikely(PageKsm(page)))
rmap_walk_ksm(page, rwc);
else if (PageAnon(page))
rmap_walk_anon(page, rwc, false);
if (unlikely(folio_test_ksm(folio)))
rmap_walk_ksm(folio, rwc);
else if (folio_test_anon(folio))
rmap_walk_anon(folio, rwc, false);
else
rmap_walk_file(page, rwc, false);
rmap_walk_file(folio, rwc, false);
}
/* Like rmap_walk, but caller holds relevant rmap lock */
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
{
/* no ksm support for now */
VM_BUG_ON_PAGE(PageKsm(page), page);
if (PageAnon(page))
rmap_walk_anon(page, rwc, true);
VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
if (folio_test_anon(folio))
rmap_walk_anon(folio, rwc, true);
else
rmap_walk_file(page, rwc, true);
rmap_walk_file(folio, rwc, true);
}
#ifdef CONFIG_HUGETLB_PAGE