mm: convert migrate_vma_insert_page() to use a folio

Replaces five calls to compound_head() with one.

Link: https://lkml.kernel.org/r/20231211162214.2146080-8-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Alistair Popple <apopple@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-12-11 16:22:12 +00:00 committed by Andrew Morton
parent cb9089babc
commit d3b0827365
1 changed files with 12 additions and 11 deletions

View File

@ -564,6 +564,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
struct page *page, struct page *page,
unsigned long *src) unsigned long *src)
{ {
struct folio *folio = page_folio(page);
struct vm_area_struct *vma = migrate->vma; struct vm_area_struct *vma = migrate->vma;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
bool flush = false; bool flush = false;
@ -596,17 +597,17 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto abort; goto abort;
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
goto abort; goto abort;
if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
goto abort; goto abort;
/* /*
* The memory barrier inside __SetPageUptodate makes sure that * The memory barrier inside __folio_mark_uptodate makes sure that
* preceding stores to the page contents become visible before * preceding stores to the folio contents become visible before
* the set_pte_at() write. * the set_pte_at() write.
*/ */
__SetPageUptodate(page); __folio_mark_uptodate(folio);
if (is_device_private_page(page)) { if (folio_is_device_private(folio)) {
swp_entry_t swp_entry; swp_entry_t swp_entry;
if (vma->vm_flags & VM_WRITE) if (vma->vm_flags & VM_WRITE)
@ -617,8 +618,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
page_to_pfn(page)); page_to_pfn(page));
entry = swp_entry_to_pte(swp_entry); entry = swp_entry_to_pte(swp_entry);
} else { } else {
if (is_zone_device_page(page) && if (folio_is_zone_device(folio) &&
!is_device_coherent_page(page)) { !folio_is_device_coherent(folio)) {
pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
goto abort; goto abort;
} }
@ -652,10 +653,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate,
goto unlock_abort; goto unlock_abort;
inc_mm_counter(mm, MM_ANONPAGES); inc_mm_counter(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, addr); folio_add_new_anon_rmap(folio, vma, addr);
if (!is_zone_device_page(page)) if (!folio_is_zone_device(folio))
lru_cache_add_inactive_or_unevictable(page, vma); folio_add_lru_vma(folio, vma);
get_page(page); folio_get(folio);
if (flush) { if (flush) {
flush_cache_page(vma, addr, pte_pfn(orig_pte)); flush_cache_page(vma, addr, pte_pfn(orig_pte));