mm: convert copy_user_huge_page() to copy_user_large_folio()

Replace copy_user_huge_page() with copy_user_large_folio(). 
copy_user_large_folio() does the same as copy_user_huge_page(), but takes
in folios instead of pages.  Remove pages_per_huge_page from
copy_user_large_folio(), because we can get that from folio_nr_pages(dst).

Convert copy_user_gigantic_page() to take in folios.

Link: https://lkml.kernel.org/r/20230410133932.32288-6-zhangpeng362@huawei.com
Signed-off-by: ZhangPeng <zhangpeng362@huawei.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Nanyong Sun <sunnanyong@huawei.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
ZhangPeng 2023-04-10 21:39:31 +08:00 committed by Andrew Morton
parent 0169fd518a
commit c0e8150e14
3 changed files with 22 additions and 24 deletions

View File

@ -3677,10 +3677,9 @@ extern const struct attribute_group memory_failure_attr_group;
extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr_hint,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page);
void copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint,
struct vm_area_struct *vma);
long copy_folio_from_user(struct folio *dst_folio,
const void __user *usr_src,
bool allow_pagefault);

View File

@ -5097,8 +5097,9 @@ again:
ret = PTR_ERR(new_folio);
break;
}
copy_user_huge_page(&new_folio->page, ptepage, addr, dst_vma,
npages);
copy_user_large_folio(new_folio,
page_folio(ptepage),
addr, dst_vma);
put_page(ptepage);
/* Install the new hugetlb folio if src pte stable */
@ -5616,8 +5617,7 @@ retry_avoidcopy:
goto out_release_all;
}
copy_user_huge_page(&new_folio->page, old_page, address, vma,
pages_per_huge_page(h));
copy_user_large_folio(new_folio, page_folio(old_page), address, vma);
__folio_mark_uptodate(new_folio);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
@ -6260,8 +6260,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
*foliop = NULL;
goto out;
}
copy_user_huge_page(&folio->page, &(*foliop)->page, dst_addr, dst_vma,
pages_per_huge_page(h));
copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
folio_put(*foliop);
*foliop = NULL;
}

View File

@ -5815,21 +5815,21 @@ void clear_huge_page(struct page *page,
process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
}
static void copy_user_gigantic_page(struct page *dst, struct page *src,
unsigned long addr,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
static void copy_user_gigantic_page(struct folio *dst, struct folio *src,
unsigned long addr,
struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
{
int i;
struct page *dst_base = dst;
struct page *src_base = src;
struct page *dst_page;
struct page *src_page;
for (i = 0; i < pages_per_huge_page; i++) {
dst = nth_page(dst_base, i);
src = nth_page(src_base, i);
dst_page = folio_page(dst, i);
src_page = folio_page(src, i);
cond_resched();
copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
copy_user_highpage(dst_page, src_page, addr + i*PAGE_SIZE, vma);
}
}
@ -5847,15 +5847,15 @@ static void copy_subpage(unsigned long addr, int idx, void *arg)
addr, copy_arg->vma);
}
void copy_user_huge_page(struct page *dst, struct page *src,
unsigned long addr_hint, struct vm_area_struct *vma,
unsigned int pages_per_huge_page)
void copy_user_large_folio(struct folio *dst, struct folio *src,
unsigned long addr_hint, struct vm_area_struct *vma)
{
unsigned int pages_per_huge_page = folio_nr_pages(dst);
unsigned long addr = addr_hint &
~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
struct copy_subpage_arg arg = {
.dst = dst,
.src = src,
.dst = &dst->page,
.src = &src->page,
.vma = vma,
};