mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-29 13:53:33 +00:00
khugepaged: remove hpage from collapse_file()
Use new_folio throughout where we had been using hpage. Link: https://lkml.kernel.org/r/20240403171838.1445826-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Vishal Moola (Oracle) <vishal.moola@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
8eca68e2cf
commit
610ff817b9
2 changed files with 42 additions and 41 deletions
|
@ -207,10 +207,10 @@ TRACE_EVENT(mm_khugepaged_scan_file,
|
|||
);
|
||||
|
||||
TRACE_EVENT(mm_khugepaged_collapse_file,
|
||||
TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index,
|
||||
TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index,
|
||||
bool is_shmem, unsigned long addr, struct file *file,
|
||||
int nr, int result),
|
||||
TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result),
|
||||
TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct mm_struct *, mm)
|
||||
__field(unsigned long, hpfn)
|
||||
|
@ -224,7 +224,7 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->mm = mm;
|
||||
__entry->hpfn = hpage ? page_to_pfn(hpage) : -1;
|
||||
__entry->hpfn = new_folio ? folio_pfn(new_folio) : -1;
|
||||
__entry->index = index;
|
||||
__entry->addr = addr;
|
||||
__entry->is_shmem = is_shmem;
|
||||
|
|
|
@ -1780,30 +1780,27 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
struct collapse_control *cc)
|
||||
{
|
||||
struct address_space *mapping = file->f_mapping;
|
||||
struct page *hpage;
|
||||
struct page *page;
|
||||
struct page *tmp;
|
||||
struct page *tmp, *dst;
|
||||
struct folio *folio, *new_folio;
|
||||
pgoff_t index = 0, end = start + HPAGE_PMD_NR;
|
||||
LIST_HEAD(pagelist);
|
||||
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
|
||||
int nr_none = 0, result = SCAN_SUCCEED;
|
||||
bool is_shmem = shmem_file(file);
|
||||
int nr = 0;
|
||||
|
||||
VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
|
||||
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
|
||||
|
||||
result = alloc_charge_folio(&new_folio, mm, cc);
|
||||
hpage = &new_folio->page;
|
||||
if (result != SCAN_SUCCEED)
|
||||
goto out;
|
||||
|
||||
__SetPageLocked(hpage);
|
||||
__folio_set_locked(new_folio);
|
||||
if (is_shmem)
|
||||
__SetPageSwapBacked(hpage);
|
||||
hpage->index = start;
|
||||
hpage->mapping = mapping;
|
||||
__folio_set_swapbacked(new_folio);
|
||||
new_folio->index = start;
|
||||
new_folio->mapping = mapping;
|
||||
|
||||
/*
|
||||
* Ensure we have slots for all the pages in the range. This is
|
||||
|
@ -2036,20 +2033,24 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
* The old pages are locked, so they won't change anymore.
|
||||
*/
|
||||
index = start;
|
||||
dst = folio_page(new_folio, 0);
|
||||
list_for_each_entry(page, &pagelist, lru) {
|
||||
while (index < page->index) {
|
||||
clear_highpage(hpage + (index % HPAGE_PMD_NR));
|
||||
clear_highpage(dst);
|
||||
index++;
|
||||
dst++;
|
||||
}
|
||||
if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
|
||||
if (copy_mc_highpage(dst, page) > 0) {
|
||||
result = SCAN_COPY_MC;
|
||||
goto rollback;
|
||||
}
|
||||
index++;
|
||||
dst++;
|
||||
}
|
||||
while (index < end) {
|
||||
clear_highpage(hpage + (index % HPAGE_PMD_NR));
|
||||
clear_highpage(dst);
|
||||
index++;
|
||||
dst++;
|
||||
}
|
||||
|
||||
if (nr_none) {
|
||||
|
@ -2077,16 +2078,17 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
}
|
||||
|
||||
/*
|
||||
* If userspace observed a missing page in a VMA with a MODE_MISSING
|
||||
* userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
|
||||
* page. If so, we need to roll back to avoid suppressing such an
|
||||
* event. Since wp/minor userfaultfds don't give userspace any
|
||||
* guarantees that the kernel doesn't fill a missing page with a zero
|
||||
* page, so they don't matter here.
|
||||
* If userspace observed a missing page in a VMA with
|
||||
* a MODE_MISSING userfaultfd, then it might expect a
|
||||
* UFFD_EVENT_PAGEFAULT for that page. If so, we need to
|
||||
* roll back to avoid suppressing such an event. Since
|
||||
* wp/minor userfaultfds don't give userspace any
|
||||
* guarantees that the kernel doesn't fill a missing
|
||||
* page with a zero page, so they don't matter here.
|
||||
*
|
||||
* Any userfaultfds registered after this point will not be able to
|
||||
* observe any missing pages due to the previously inserted retry
|
||||
* entries.
|
||||
* Any userfaultfds registered after this point will
|
||||
* not be able to observe any missing pages due to the
|
||||
* previously inserted retry entries.
|
||||
*/
|
||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
|
||||
if (userfaultfd_missing(vma)) {
|
||||
|
@ -2111,33 +2113,32 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
xas_lock_irq(&xas);
|
||||
}
|
||||
|
||||
folio = page_folio(hpage);
|
||||
nr = folio_nr_pages(folio);
|
||||
if (is_shmem)
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr);
|
||||
__lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
|
||||
else
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_THPS, nr);
|
||||
__lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
|
||||
|
||||
if (nr_none) {
|
||||
__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_none);
|
||||
__lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
|
||||
/* nr_none is always 0 for non-shmem. */
|
||||
__lruvec_stat_mod_folio(folio, NR_SHMEM, nr_none);
|
||||
__lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark hpage as uptodate before inserting it into the page cache so
|
||||
* that it isn't mistaken for an fallocated but unwritten page.
|
||||
* Mark new_folio as uptodate before inserting it into the
|
||||
* page cache so that it isn't mistaken for an fallocated but
|
||||
* unwritten page.
|
||||
*/
|
||||
folio_mark_uptodate(folio);
|
||||
folio_ref_add(folio, HPAGE_PMD_NR - 1);
|
||||
folio_mark_uptodate(new_folio);
|
||||
folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
|
||||
|
||||
if (is_shmem)
|
||||
folio_mark_dirty(folio);
|
||||
folio_add_lru(folio);
|
||||
folio_mark_dirty(new_folio);
|
||||
folio_add_lru(new_folio);
|
||||
|
||||
/* Join all the small entries into a single multi-index entry. */
|
||||
xas_set_order(&xas, start, HPAGE_PMD_ORDER);
|
||||
xas_store(&xas, folio);
|
||||
xas_store(&xas, new_folio);
|
||||
WARN_ON_ONCE(xas_error(&xas));
|
||||
xas_unlock_irq(&xas);
|
||||
|
||||
|
@ -2148,7 +2149,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
retract_page_tables(mapping, start);
|
||||
if (cc && !cc->is_khugepaged)
|
||||
result = SCAN_PTE_MAPPED_HUGEPAGE;
|
||||
folio_unlock(folio);
|
||||
folio_unlock(new_folio);
|
||||
|
||||
/*
|
||||
* The collapse has succeeded, so free the old pages.
|
||||
|
@ -2193,13 +2194,13 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
|||
smp_mb();
|
||||
}
|
||||
|
||||
hpage->mapping = NULL;
|
||||
new_folio->mapping = NULL;
|
||||
|
||||
unlock_page(hpage);
|
||||
put_page(hpage);
|
||||
folio_unlock(new_folio);
|
||||
folio_put(new_folio);
|
||||
out:
|
||||
VM_BUG_ON(!list_empty(&pagelist));
|
||||
trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
|
||||
trace_mm_khugepaged_collapse_file(mm, new_folio, index, is_shmem, addr, file, HPAGE_PMD_NR, result);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue