mm: inline destroy_large_folio() into __folio_put_large()

destroy_large_folio() has only one caller, move its contents there.

Link: https://lkml.kernel.org/r/20240405153228.2563754-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-04-05 16:32:25 +01:00 committed by Andrew Morton
parent 5b8d75913a
commit 2542b1ac9a
3 changed files with 10 additions and 19 deletions

View File

@ -1318,8 +1318,6 @@ void folio_copy(struct folio *dst, struct folio *src);
unsigned long nr_free_buffer_pages(void);
void destroy_large_folio(struct folio *folio);
/* Returns the number of bytes in this potentially compound page. */
static inline unsigned long page_size(struct page *page)
{

View File

@ -565,20 +565,6 @@ void prep_compound_page(struct page *page, unsigned int order)
prep_compound_head(page, order);
}
void destroy_large_folio(struct folio *folio)
{
if (folio_test_hugetlb(folio)) {
free_huge_folio(folio);
return;
}
if (folio_test_large_rmappable(folio))
folio_undo_large_rmappable(folio);
mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, folio_order(folio));
}
static inline void set_buddy_order(struct page *page, unsigned int order)
{
set_page_private(page, order);

View File

@ -127,9 +127,16 @@ static void __folio_put_large(struct folio *folio)
* (it's never listed to any LRU lists) and no memcg routines should
* be called for hugetlb (it has a separate hugetlb_cgroup.)
*/
if (!folio_test_hugetlb(folio))
page_cache_release(folio);
destroy_large_folio(folio);
if (folio_test_hugetlb(folio)) {
free_huge_folio(folio);
return;
}
page_cache_release(folio);
if (folio_test_large_rmappable(folio))
folio_undo_large_rmappable(folio);
mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, folio_order(folio));
}
void __folio_put(struct folio *folio)