hugetlb: move vm_fault declaration to the top of hugetlb_fault()

hugetlb_fault() currently defines a vm_fault to pass to the generic
handle_userfault() function.  We can move this definition to the top of
hugetlb_fault() so that it can be used throughout the rest of the hugetlb
fault path.

This will help cleanup a number of excess variables and function arguments
throughout the stack.  Also, since vm_fault already has space to store the
page offset, use that instead and get rid of idx.

Link: https://lkml.kernel.org/r/20240221234732.187629-3-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Vishal Moola (Oracle) 2024-02-21 15:47:29 -08:00 committed by Andrew Morton
parent 997f0ecb11
commit 0ca22723e3
1 changed files with 19 additions and 13 deletions

View File

@ -6378,13 +6378,25 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
vm_fault_t ret;
u32 hash;
pgoff_t idx;
struct folio *folio = NULL;
struct folio *pagecache_folio = NULL;
struct hstate *h = hstate_vma(vma);
struct address_space *mapping;
int need_wait_lock = 0;
unsigned long haddr = address & huge_page_mask(h);
struct vm_fault vmf = {
.vma = vma,
.address = haddr,
.real_address = address,
.flags = flags,
.pgoff = vma_hugecache_offset(h, vma, haddr),
/* TODO: Track hugetlb faults using vm_fault */
/*
* Some fields may not be initialized, be careful as it may
* be hard to debug if called functions make assumptions
*/
};
/* TODO: Handle faults under the VMA lock */
if (flags & FAULT_FLAG_VMA_LOCK) {
@ -6398,8 +6410,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* the same page in the page cache.
*/
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, haddr);
hash = hugetlb_fault_mutex_hash(mapping, idx);
hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
@ -6433,8 +6444,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* hugetlb_no_page will drop vma lock and hugetlb fault
* mutex internally, which make us return immediately.
*/
return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
entry, flags);
return hugetlb_no_page(mm, vma, mapping, vmf.pgoff, address,
ptep, entry, flags);
}
ret = 0;
@ -6480,7 +6492,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
/* Just decrements count, does not deallocate */
vma_end_reservation(h, vma, haddr);
pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, idx);
pagecache_folio = filemap_lock_hugetlb_folio(h, mapping,
vmf.pgoff);
if (IS_ERR(pagecache_folio))
pagecache_folio = NULL;
}
@ -6495,13 +6508,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
(flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
if (!userfaultfd_wp_async(vma)) {
struct vm_fault vmf = {
.vma = vma,
.address = haddr,
.real_address = address,
.flags = flags,
};
spin_unlock(ptl);
if (pagecache_folio) {
folio_unlock(pagecache_folio);