mm: memory: make numa_migrate_prep() to take a folio

In preparation for large folio numa balancing, make numa_migrate_prep() to
take a folio, no functional change intended.

Link: https://lkml.kernel.org/r/20230921074417.24004-5-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang 2023-09-21 15:44:15 +08:00 committed by Andrew Morton
parent 6695cf68b1
commit cda6d93672
3 changed files with 6 additions and 7 deletions

View File

@ -1556,7 +1556,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
*/
if (node_is_toptier(nid))
last_cpupid = page_cpupid_last(&folio->page);
target_nid = numa_migrate_prep(&folio->page, vma, haddr, nid, &flags);
target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags);
if (target_nid == NUMA_NO_NODE) {
folio_put(folio);
goto out_map;

View File

@ -983,7 +983,7 @@ void vunmap_range_noflush(unsigned long start, unsigned long end);
void __vunmap_range_noflush(unsigned long start, unsigned long end);
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, int page_nid, int *flags);
void free_zone_device_page(struct page *page);

View File

@ -4724,10 +4724,10 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
return ret;
}
int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, int page_nid, int *flags)
{
get_page(page);
folio_get(folio);
/* Record the current PID acceesing VMA */
vma_set_access_pid_bit(vma);
@ -4738,7 +4738,7 @@ int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
*flags |= TNF_FAULT_LOCAL;
}
return mpol_misplaced(page, vma, addr);
return mpol_misplaced(&folio->page, vma, addr);
}
static vm_fault_t do_numa_page(struct vm_fault *vmf)
@ -4812,8 +4812,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
last_cpupid = (-1 & LAST_CPUPID_MASK);
else
last_cpupid = page_cpupid_last(&folio->page);
target_nid = numa_migrate_prep(&folio->page, vma, vmf->address, nid,
&flags);
target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags);
if (target_nid == NUMA_NO_NODE) {
folio_put(folio);
goto out_map;