mm: remove munlock_vma_page()

All callers now have a folio and can call munlock_vma_folio().  Update the
documentation to refer to munlock_vma_folio().

Link: https://lkml.kernel.org/r/20230116192827.2146732-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-01-16 19:28:26 +00:00 committed by Andrew Morton
parent 7efecffb8e
commit 672aa27d0b
4 changed files with 8 additions and 17 deletions

View File

@ -486,7 +486,7 @@ Before the unevictable/mlock changes, mlocking did not mark the pages in any
way, so unmapping them required no processing.
For each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED
(unless it was a PTE mapping of a part of a transparent huge page).
munlock_page() uses the mlock pagevec to batch up work to be done under
@ -510,7 +510,7 @@ which had been Copied-On-Write from the file pages now being truncated.
Mlocked pages can be munlocked and deleted in this way: like with munmap(),
for each PTE (or PMD) being unmapped from a VMA, page_remove_rmap() calls
munlock_vma_page(), which calls munlock_page() when the VMA is VM_LOCKED
munlock_vma_folio(), which calls munlock_folio() when the VMA is VM_LOCKED
(unless it was a PTE mapping of a part of a transparent huge page).
However, if there is a racing munlock(), since mlock_vma_pages_range() starts

View File

@ -22,7 +22,6 @@
#include <linux/swap.h> /* folio_free_swap */
#include <linux/ptrace.h> /* user_enable_single_step */
#include <linux/kdebug.h> /* notifier mechanism */
#include "../../mm/internal.h" /* munlock_vma_page */
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>

View File

@ -548,7 +548,6 @@ static inline void mlock_vma_folio(struct folio *folio,
}
void munlock_folio(struct folio *folio);
static inline void munlock_vma_folio(struct folio *folio,
struct vm_area_struct *vma, bool compound)
{
@ -557,11 +556,6 @@ static inline void munlock_vma_folio(struct folio *folio,
munlock_folio(folio);
}
static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound)
{
munlock_vma_folio(page_folio(page), vma, compound);
}
void mlock_new_folio(struct folio *folio);
bool need_mlock_drain(int cpu);
void mlock_drain_local(void);
@ -650,8 +644,6 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
}
#else /* !CONFIG_MMU */
static inline void unmap_mapping_folio(struct folio *folio) { }
static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_folio(struct folio *folio) { }
static inline bool need_mlock_drain(int cpu) { return false; }
static inline void mlock_drain_local(void) { }

View File

@ -1431,14 +1431,14 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
}
/*
* It would be tidy to reset PageAnon mapping when fully unmapped,
* but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping
* before us: so leave the reset to free_pages_prepare,
* and remember that it's only reliable while mapped.
* It would be tidy to reset folio_test_anon mapping when fully
* unmapped, but that might overwrite a racing page_add_anon_rmap
* which increments mapcount after us but sets mapping before us:
* so leave the reset to free_pages_prepare, and remember that
* it's only reliable while mapped.
*/
munlock_vma_page(page, vma, compound);
munlock_vma_folio(folio, vma, compound);
}
/*