linux-stable/mm/folio-compat.c
Hugh Dickins cb67f4282b mm,thp,rmap: simplify compound page mapcount handling
Compound page (folio) mapcount calculations have been different for anon
and file (or shmem) THPs, and involved the obscure PageDoubleMap flag. 
And each huge mapping and unmapping of a file (or shmem) THP involved
atomically incrementing and decrementing the mapcount of every subpage of
that huge page, dirtying many struct page cachelines.

Add subpages_mapcount field to the struct folio and first tail page, so
that the total of subpage mapcounts is available in one place near the
head: then page_mapcount() and total_mapcount() and page_mapped(), and
their folio equivalents, are so quick that anon and file and hugetlb don't
need to be optimized differently.  Delete the unloved PageDoubleMap.

page_add and page_remove rmap functions must now maintain the
subpages_mapcount as well as the subpage _mapcount, when dealing with pte
mappings of huge pages; and correct maintenance of NR_ANON_MAPPED and
NR_FILE_MAPPED statistics still needs reading through the subpages, using
nr_subpages_unmapped() - but only when first or last pmd mapping finds
subpages_mapcount raised (double-map case, not the common case).

But are those counts (used to decide when to split an anon THP, and in
vmscan's pagecache_reclaimable heuristic) correctly maintained?  Not
quite: since page_remove_rmap() (and also split_huge_pmd()) is often
called without page lock, there can be races when a subpage pte mapcount
0<->1 while compound pmd mapcount 0<->1 is scanning - races which the
previous implementation had prevented.  The statistics might become
inaccurate, and even drift down until they underflow through 0.  That is
not good enough, but is better dealt with in a followup patch.

Update a few comments on first and second tail page overlaid fields. 
hugepage_add_new_anon_rmap() has to "increment" compound_mapcount, but
subpages_mapcount and compound_pincount are already correctly at 0, so
delete its reinitialization of compound_pincount.

A simple 100 X munmap(mmap(2GB, MAP_SHARED|MAP_POPULATE, tmpfs), 2GB) took
18 seconds on small pages, and used to take 1 second on huge pages, but
now takes 119 milliseconds on huge pages.  Mapping by pmds a second time
used to take 860ms and now takes 92ms; mapping by pmds after mapping by
ptes (when the scan is needed) used to take 870ms and now takes 495ms. 
But there might be some benchmarks which would show a slowdown, because
tail struct pages now fall out of cache until final freeing checks them.

Link: https://lkml.kernel.org/r/47ad693-717-79c8-e1ba-46c3a6602e48@google.com
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: James Houghton <jthoughton@google.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mina Almasry <almasrymina@google.com>
Cc: Muchun Song <songmuchun@bytedance.com>
Cc: Naoya Horiguchi <naoya.horiguchi@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Zach O'Keefe <zokeefe@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2022-11-30 15:58:46 -08:00

137 lines
3.2 KiB
C

/*
* Compatibility functions which bloat the callers too much to make inline.
* All of the callers of these functions should be converted to use folios
* eventually.
*/
#include <linux/migrate.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
#include "internal.h"
struct address_space *page_mapping(struct page *page)
{
return folio_mapping(page_folio(page));
}
EXPORT_SYMBOL(page_mapping);
void unlock_page(struct page *page)
{
return folio_unlock(page_folio(page));
}
EXPORT_SYMBOL(unlock_page);
void end_page_writeback(struct page *page)
{
return folio_end_writeback(page_folio(page));
}
EXPORT_SYMBOL(end_page_writeback);
void wait_on_page_writeback(struct page *page)
{
return folio_wait_writeback(page_folio(page));
}
EXPORT_SYMBOL_GPL(wait_on_page_writeback);
void wait_for_stable_page(struct page *page)
{
return folio_wait_stable(page_folio(page));
}
EXPORT_SYMBOL_GPL(wait_for_stable_page);
void mark_page_accessed(struct page *page)
{
folio_mark_accessed(page_folio(page));
}
EXPORT_SYMBOL(mark_page_accessed);
bool set_page_writeback(struct page *page)
{
return folio_start_writeback(page_folio(page));
}
EXPORT_SYMBOL(set_page_writeback);
bool set_page_dirty(struct page *page)
{
return folio_mark_dirty(page_folio(page));
}
EXPORT_SYMBOL(set_page_dirty);
int __set_page_dirty_nobuffers(struct page *page)
{
return filemap_dirty_folio(page_mapping(page), page_folio(page));
}
EXPORT_SYMBOL(__set_page_dirty_nobuffers);
bool clear_page_dirty_for_io(struct page *page)
{
return folio_clear_dirty_for_io(page_folio(page));
}
EXPORT_SYMBOL(clear_page_dirty_for_io);
bool redirty_page_for_writepage(struct writeback_control *wbc,
struct page *page)
{
return folio_redirty_for_writepage(wbc, page_folio(page));
}
EXPORT_SYMBOL(redirty_page_for_writepage);
void lru_cache_add(struct page *page)
{
folio_add_lru(page_folio(page));
}
EXPORT_SYMBOL(lru_cache_add);
void lru_cache_add_inactive_or_unevictable(struct page *page,
struct vm_area_struct *vma)
{
folio_add_lru_vma(page_folio(page), vma);
}
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
return filemap_add_folio(mapping, page_folio(page), index, gfp);
}
EXPORT_SYMBOL(add_to_page_cache_lru);
noinline
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp)
{
struct folio *folio;
folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
if (!folio || xa_is_value(folio))
return &folio->page;
return folio_file_page(folio, index);
}
EXPORT_SYMBOL(pagecache_get_page);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index)
{
unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
return pagecache_get_page(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
}
EXPORT_SYMBOL(grab_cache_page_write_begin);
int try_to_release_page(struct page *page, gfp_t gfp)
{
return filemap_release_folio(page_folio(page), gfp);
}
EXPORT_SYMBOL(try_to_release_page);
int isolate_lru_page(struct page *page)
{
if (WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"))
return -EBUSY;
return folio_isolate_lru((struct folio *)page);
}
void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}