mm: convert free_zone_device_page to free_zone_device_folio

Both callers already have a folio; pass it in and save a few calls to
compound_head().

Link: https://lkml.kernel.org/r/20240405153228.2563754-6-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2024-04-05 16:32:27 +01:00 committed by Andrew Morton
parent 79a4828751
commit 9f100e3b37
3 changed files with 19 additions and 17 deletions

View File

@ -1165,7 +1165,7 @@ void __vunmap_range_noflush(unsigned long start, unsigned long end);
int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
unsigned long addr, int page_nid, int *flags);
void free_zone_device_page(struct page *page);
void free_zone_device_folio(struct folio *folio);
int migrate_device_coherent_page(struct page *page);
/*

View File

@ -456,21 +456,23 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
}
EXPORT_SYMBOL_GPL(get_dev_pagemap);
void free_zone_device_page(struct page *page)
void free_zone_device_folio(struct folio *folio)
{
if (WARN_ON_ONCE(!page->pgmap->ops || !page->pgmap->ops->page_free))
if (WARN_ON_ONCE(!folio->page.pgmap->ops ||
!folio->page.pgmap->ops->page_free))
return;
mem_cgroup_uncharge(page_folio(page));
mem_cgroup_uncharge(folio);
/*
* Note: we don't expect anonymous compound pages yet. Once supported
* and we could PTE-map them similar to THP, we'd have to clear
* PG_anon_exclusive on all tail pages.
*/
VM_BUG_ON_PAGE(PageAnon(page) && PageCompound(page), page);
if (PageAnon(page))
__ClearPageAnonExclusive(page);
if (folio_test_anon(folio)) {
VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
__ClearPageAnonExclusive(folio_page(folio, 0));
}
/*
* When a device managed page is freed, the folio->mapping field
@ -481,20 +483,20 @@ void free_zone_device_page(struct page *page)
*
* For other types of ZONE_DEVICE pages, migration is either
* handled differently or not done at all, so there is no need
* to clear page->mapping.
* to clear folio->mapping.
*/
page->mapping = NULL;
page->pgmap->ops->page_free(page);
folio->mapping = NULL;
folio->page.pgmap->ops->page_free(folio_page(folio, 0));
if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
page->pgmap->type != MEMORY_DEVICE_COHERENT)
if (folio->page.pgmap->type != MEMORY_DEVICE_PRIVATE &&
folio->page.pgmap->type != MEMORY_DEVICE_COHERENT)
/*
* Reset the page count to 1 to prepare for handing out the page
* Reset the refcount to 1 to prepare for handing out the page
* again.
*/
set_page_count(page, 1);
folio_set_count(folio, 1);
else
put_dev_pagemap(page->pgmap);
put_dev_pagemap(folio->page.pgmap);
}
void zone_device_page_init(struct page *page)

View File

@ -115,7 +115,7 @@ static void page_cache_release(struct folio *folio)
void __folio_put(struct folio *folio)
{
if (unlikely(folio_is_zone_device(folio))) {
free_zone_device_page(&folio->page);
free_zone_device_folio(folio);
return;
} else if (folio_test_hugetlb(folio)) {
free_huge_folio(folio);
@ -983,7 +983,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
if (put_devmap_managed_page_refs(&folio->page, nr_refs))
continue;
if (folio_ref_sub_and_test(folio, nr_refs))
free_zone_device_page(&folio->page);
free_zone_device_folio(folio);
continue;
}