mm: convert to should_zap_page() to should_zap_folio()

Make should_zap_page() take a folio and rename it to should_zap_folio() as
preparation for converting mm counter functions to take a folio.  Saves a
call to compound_head() hidden inside PageAnon().

[wangkefeng.wang@huawei.com: fix used-uninitialized warning]
  Link: https://lkml.kernel.org/r/962a7993-fce9-4de8-85cd-25e290f25736@huawei.com
Link: https://lkml.kernel.org/r/20240111152429.3374566-9-willy@infradead.org
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang 2024-01-11 15:24:27 +00:00 committed by Andrew Morton
parent 530c2a0da0
commit eabafaaa95
1 changed files with 17 additions and 14 deletions

View File

@ -1369,19 +1369,20 @@ static inline bool should_zap_cows(struct zap_details *details)
return details->even_cows;
}
/* Decides whether we should zap this page with the page pointer specified */
static inline bool should_zap_page(struct zap_details *details, struct page *page)
/* Decides whether we should zap this folio with the folio pointer specified */
static inline bool should_zap_folio(struct zap_details *details,
struct folio *folio)
{
/* If we can make a decision without *page.. */
/* If we can make a decision without *folio.. */
if (should_zap_cows(details))
return true;
/* E.g. the caller passes NULL for the case of a zero page */
if (!page)
/* E.g. the caller passes NULL for the case of a zero folio */
if (!folio)
return true;
/* Otherwise we should only zap non-anon pages */
return !PageAnon(page);
/* Otherwise we should only zap non-anon folios */
return !folio_test_anon(folio);
}
static inline bool zap_drop_file_uffd_wp(struct zap_details *details)
@ -1434,7 +1435,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = ptep_get(pte);
struct folio *folio;
struct folio *folio = NULL;
struct page *page;
if (pte_none(ptent))
@ -1447,7 +1448,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
unsigned int delay_rmap;
page = vm_normal_page(vma, addr, ptent);
if (unlikely(!should_zap_page(details, page)))
if (page)
folio = page_folio(page);
if (unlikely(!should_zap_folio(details, folio)))
continue;
ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm);
@ -1460,7 +1464,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
continue;
}
folio = page_folio(page);
delay_rmap = 0;
if (!folio_test_anon(folio)) {
if (pte_dirty(ptent)) {
@ -1492,7 +1495,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
is_device_exclusive_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
folio = page_folio(page);
if (unlikely(!should_zap_page(details, page)))
if (unlikely(!should_zap_folio(details, folio)))
continue;
/*
* Both device private/exclusive mappings should only
@ -1513,10 +1516,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
} else if (is_migration_entry(entry)) {
page = pfn_swap_entry_to_page(entry);
if (!should_zap_page(details, page))
folio = pfn_swap_entry_folio(entry);
if (!should_zap_folio(details, folio))
continue;
rss[mm_counter(page)]--;
rss[mm_counter(&folio->page)]--;
} else if (pte_marker_entry_uffd_wp(entry)) {
/*
* For anon: always drop the marker; for file: only