mm/rmap: convert page_move_anon_rmap() to folio_move_anon_rmap()
Let's convert it to consume a folio. [akpm@linux-foundation.org: fix kerneldoc] Link: https://lkml.kernel.org/r/20231002142949.235104-3-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5ca432896a
commit
069686255c
|
@ -189,7 +189,7 @@ typedef int __bitwise rmap_t;
|
|||
/*
|
||||
* rmap interfaces called when adding or removing pte of page
|
||||
*/
|
||||
void page_move_anon_rmap(struct page *, struct vm_area_struct *);
|
||||
void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
|
||||
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long address, rmap_t flags);
|
||||
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
|
|
|
@ -1376,7 +1376,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf)
|
|||
if (folio_ref_count(folio) == 1) {
|
||||
pmd_t entry;
|
||||
|
||||
page_move_anon_rmap(page, vma);
|
||||
folio_move_anon_rmap(folio, vma);
|
||||
SetPageAnonExclusive(page);
|
||||
folio_unlock(folio);
|
||||
reuse:
|
||||
|
|
|
@ -5653,7 +5653,7 @@ retry_avoidcopy:
|
|||
*/
|
||||
if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
|
||||
if (!PageAnonExclusive(&old_folio->page)) {
|
||||
page_move_anon_rmap(&old_folio->page, vma);
|
||||
folio_move_anon_rmap(old_folio, vma);
|
||||
SetPageAnonExclusive(&old_folio->page);
|
||||
}
|
||||
if (likely(!unshare))
|
||||
|
|
|
@ -3480,7 +3480,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
|
|||
* and the folio is locked, it's dark out, and we're wearing
|
||||
* sunglasses. Hit it.
|
||||
*/
|
||||
page_move_anon_rmap(vmf->page, vma);
|
||||
folio_move_anon_rmap(folio, vma);
|
||||
SetPageAnonExclusive(vmf->page);
|
||||
folio_unlock(folio);
|
||||
reuse:
|
||||
|
|
16
mm/rmap.c
16
mm/rmap.c
|
@ -1128,19 +1128,17 @@ int folio_total_mapcount(struct folio *folio)
|
|||
}
|
||||
|
||||
/**
|
||||
* page_move_anon_rmap - move a page to our anon_vma
|
||||
* @page: the page to move to our anon_vma
|
||||
* @vma: the vma the page belongs to
|
||||
* folio_move_anon_rmap - move a folio to our anon_vma
|
||||
* @folio: The folio to move to our anon_vma
|
||||
* @vma: The vma the folio belongs to
|
||||
*
|
||||
* When a page belongs exclusively to one process after a COW event,
|
||||
* that page can be moved into the anon_vma that belongs to just that
|
||||
* process, so the rmap code will not search the parent or sibling
|
||||
* processes.
|
||||
* When a folio belongs exclusively to one process after a COW event,
|
||||
* that folio can be moved into the anon_vma that belongs to just that
|
||||
* process, so the rmap code will not search the parent or sibling processes.
|
||||
*/
|
||||
void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
|
||||
void folio_move_anon_rmap(struct folio *folio, struct vm_area_struct *vma)
|
||||
{
|
||||
void *anon_vma = vma->anon_vma;
|
||||
struct folio *folio = page_folio(page);
|
||||
|
||||
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
VM_BUG_ON_VMA(!anon_vma, vma);
|
||||
|
|
Loading…
Reference in New Issue