mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-28 21:33:52 +00:00
mm/rmap: factor out adding folio mappings into __folio_add_rmap()
Let's factor it out to prepare for reuse as we convert page_add_anon_rmap() to folio_add_anon_rmap_[pte|ptes|pmd](). Make the compiler always special-case on the granularity by using __always_inline. Link: https://lkml.kernel.org/r/20231220224504.646757-14-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
be6e57cfab
commit
96fd74958c
1 changed files with 44 additions and 34 deletions
78
mm/rmap.c
78
mm/rmap.c
|
@ -1157,6 +1157,48 @@ int folio_total_mapcount(struct folio *folio)
|
||||||
return mapcount;
|
return mapcount;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline unsigned int __folio_add_rmap(struct folio *folio,
|
||||||
|
struct page *page, int nr_pages, enum rmap_level level,
|
||||||
|
int *nr_pmdmapped)
|
||||||
|
{
|
||||||
|
atomic_t *mapped = &folio->_nr_pages_mapped;
|
||||||
|
int first, nr = 0;
|
||||||
|
|
||||||
|
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
|
||||||
|
|
||||||
|
switch (level) {
|
||||||
|
case RMAP_LEVEL_PTE:
|
||||||
|
do {
|
||||||
|
first = atomic_inc_and_test(&page->_mapcount);
|
||||||
|
if (first && folio_test_large(folio)) {
|
||||||
|
first = atomic_inc_return_relaxed(mapped);
|
||||||
|
first = (first < COMPOUND_MAPPED);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (first)
|
||||||
|
nr++;
|
||||||
|
} while (page++, --nr_pages > 0);
|
||||||
|
break;
|
||||||
|
case RMAP_LEVEL_PMD:
|
||||||
|
first = atomic_inc_and_test(&folio->_entire_mapcount);
|
||||||
|
if (first) {
|
||||||
|
nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
|
||||||
|
if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
|
||||||
|
*nr_pmdmapped = folio_nr_pages(folio);
|
||||||
|
nr = *nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
|
||||||
|
/* Raced ahead of a remove and another add? */
|
||||||
|
if (unlikely(nr < 0))
|
||||||
|
nr = 0;
|
||||||
|
} else {
|
||||||
|
/* Raced ahead of a remove of COMPOUND_MAPPED */
|
||||||
|
nr = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return nr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* folio_move_anon_rmap - move a folio to our anon_vma
|
* folio_move_anon_rmap - move a folio to our anon_vma
|
||||||
* @folio: The folio to move to our anon_vma
|
* @folio: The folio to move to our anon_vma
|
||||||
|
@ -1382,43 +1424,11 @@ static __always_inline void __folio_add_file_rmap(struct folio *folio,
|
||||||
struct page *page, int nr_pages, struct vm_area_struct *vma,
|
struct page *page, int nr_pages, struct vm_area_struct *vma,
|
||||||
enum rmap_level level)
|
enum rmap_level level)
|
||||||
{
|
{
|
||||||
atomic_t *mapped = &folio->_nr_pages_mapped;
|
int nr, nr_pmdmapped = 0;
|
||||||
int nr = 0, nr_pmdmapped = 0, first;
|
|
||||||
|
|
||||||
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
|
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
|
||||||
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
|
|
||||||
|
|
||||||
switch (level) {
|
|
||||||
case RMAP_LEVEL_PTE:
|
|
||||||
do {
|
|
||||||
first = atomic_inc_and_test(&page->_mapcount);
|
|
||||||
if (first && folio_test_large(folio)) {
|
|
||||||
first = atomic_inc_return_relaxed(mapped);
|
|
||||||
first = (first < COMPOUND_MAPPED);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (first)
|
|
||||||
nr++;
|
|
||||||
} while (page++, --nr_pages > 0);
|
|
||||||
break;
|
|
||||||
case RMAP_LEVEL_PMD:
|
|
||||||
first = atomic_inc_and_test(&folio->_entire_mapcount);
|
|
||||||
if (first) {
|
|
||||||
nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
|
|
||||||
if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
|
|
||||||
nr_pmdmapped = folio_nr_pages(folio);
|
|
||||||
nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
|
|
||||||
/* Raced ahead of a remove and another add? */
|
|
||||||
if (unlikely(nr < 0))
|
|
||||||
nr = 0;
|
|
||||||
} else {
|
|
||||||
/* Raced ahead of a remove of COMPOUND_MAPPED */
|
|
||||||
nr = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
nr = __folio_add_rmap(folio, page, nr_pages, level, &nr_pmdmapped);
|
||||||
if (nr_pmdmapped)
|
if (nr_pmdmapped)
|
||||||
__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
|
__lruvec_stat_mod_folio(folio, folio_test_swapbacked(folio) ?
|
||||||
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
|
NR_SHMEM_PMDMAPPED : NR_FILE_PMDMAPPED, nr_pmdmapped);
|
||||||
|
|
Loading…
Reference in a new issue