mm/rmap: page_remove_rmap() -> folio_remove_rmap_pte()

Let's convert try_to_unmap_one() and try_to_migrate_one().

Link: https://lkml.kernel.org/r/20231220224504.646757-31-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2023-12-20 23:44:54 +01:00 committed by Andrew Morton
parent 5b205c7f26
commit ca1a074618
1 changed files with 5 additions and 5 deletions

View File

@ -1647,7 +1647,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
* in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_unmap() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
@ -1928,7 +1928,7 @@ discard:
if (unlikely(folio_test_hugetlb(folio)))
hugetlb_remove_rmap(folio);
else
page_remove_rmap(subpage, vma, false);
folio_remove_rmap_pte(folio, subpage, vma);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@ -1996,7 +1996,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
* in between its ptep_get_and_clear_full() and folio_remove_rmap_*(),
* try_to_migrate() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
@ -2289,7 +2289,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (unlikely(folio_test_hugetlb(folio)))
hugetlb_remove_rmap(folio);
else
page_remove_rmap(subpage, vma, false);
folio_remove_rmap_pte(folio, subpage, vma);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@ -2428,7 +2428,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
* There is a reference on the page for the swap entry which has
* been removed, so shouldn't take another.
*/
page_remove_rmap(subpage, vma, false);
folio_remove_rmap_pte(folio, subpage, vma);
}
mmu_notifier_invalidate_range_end(&range);