mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-26 04:16:39 +00:00
mm: remove folio_prep_large_rmappable()
Now that prep_compound_page() initialises folio->_deferred_list, folio_prep_large_rmappable()'s only purpose is to set the large_rmappable flag, so inline it into the two callers. Take the opportunity to convert the large_rmappable definition from PAGEFLAG to FOLIO_FLAG and remove the existance of PageTestLargeRmappable and friends. Link: https://lkml.kernel.org/r/20240321142448.1645400-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
b7b098cf00
commit
85edc15a4c
4 changed files with 5 additions and 14 deletions
|
@ -263,7 +263,6 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
|
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
|
||||||
unsigned long len, unsigned long pgoff, unsigned long flags);
|
unsigned long len, unsigned long pgoff, unsigned long flags);
|
||||||
|
|
||||||
void folio_prep_large_rmappable(struct folio *folio);
|
|
||||||
bool can_split_folio(struct folio *folio, int *pextra_pins);
|
bool can_split_folio(struct folio *folio, int *pextra_pins);
|
||||||
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
|
||||||
unsigned int new_order);
|
unsigned int new_order);
|
||||||
|
@ -411,8 +410,6 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void folio_prep_large_rmappable(struct folio *folio) {}
|
|
||||||
|
|
||||||
#define transparent_hugepage_flags 0UL
|
#define transparent_hugepage_flags 0UL
|
||||||
|
|
||||||
#define thp_get_unmapped_area NULL
|
#define thp_get_unmapped_area NULL
|
||||||
|
|
|
@ -868,9 +868,9 @@ static inline void ClearPageCompound(struct page *page)
|
||||||
BUG_ON(!PageHead(page));
|
BUG_ON(!PageHead(page));
|
||||||
ClearPageHead(page);
|
ClearPageHead(page);
|
||||||
}
|
}
|
||||||
PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
|
FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE)
|
||||||
#else
|
#else
|
||||||
TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
|
FOLIO_FLAG_FALSE(large_rmappable)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define PG_head_mask ((1UL << PG_head))
|
#define PG_head_mask ((1UL << PG_head))
|
||||||
|
|
|
@ -789,13 +789,6 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void folio_prep_large_rmappable(struct folio *folio)
|
|
||||||
{
|
|
||||||
if (!folio || !folio_test_large(folio))
|
|
||||||
return;
|
|
||||||
folio_set_large_rmappable(folio);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool is_transparent_hugepage(struct folio *folio)
|
static inline bool is_transparent_hugepage(struct folio *folio)
|
||||||
{
|
{
|
||||||
if (!folio_test_large(folio))
|
if (!folio_test_large(folio))
|
||||||
|
@ -2862,7 +2855,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
|
||||||
clear_compound_head(page_tail);
|
clear_compound_head(page_tail);
|
||||||
if (new_order) {
|
if (new_order) {
|
||||||
prep_compound_page(page_tail, new_order);
|
prep_compound_page(page_tail, new_order);
|
||||||
folio_prep_large_rmappable(new_folio);
|
folio_set_large_rmappable(new_folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Finally unfreeze refcount. Additional reference from page cache. */
|
/* Finally unfreeze refcount. Additional reference from page cache. */
|
||||||
|
|
|
@ -513,7 +513,8 @@ static inline struct folio *page_rmappable_folio(struct page *page)
|
||||||
{
|
{
|
||||||
struct folio *folio = (struct folio *)page;
|
struct folio *folio = (struct folio *)page;
|
||||||
|
|
||||||
folio_prep_large_rmappable(folio);
|
if (folio && folio_test_large(folio))
|
||||||
|
folio_set_large_rmappable(folio);
|
||||||
return folio;
|
return folio;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue