mm: convert lock_page_or_retry() to folio_lock_or_retry()

Remove a call to compound_head() in each of the two callers.

Link: https://lkml.kernel.org/r/20220902194653.1739778-58-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-09-02 20:46:53 +01:00 committed by Andrew Morton
parent 82e66bf761
commit 19672a9e4a
2 changed files with 8 additions and 11 deletions

View File

@ -989,19 +989,16 @@ static inline int lock_page_killable(struct page *page)
}
/*
* lock_page_or_retry - Lock the page, unless this would block and the
* folio_lock_or_retry - Lock the folio, unless this would block and the
* caller indicated that it can handle a retry.
*
* Return value and mmap_lock implications depend on flags; see
* __folio_lock_or_retry().
*/
static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
unsigned int flags)
static inline bool folio_lock_or_retry(struct folio *folio,
struct mm_struct *mm, unsigned int flags)
{
struct folio *folio;
might_sleep();
folio = page_folio(page);
return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
}

View File

@ -3618,11 +3618,11 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/
static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct folio *folio = page_folio(vmf->page);
struct vm_area_struct *vma = vmf->vma;
struct mmu_notifier_range range;
if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
return VM_FAULT_RETRY;
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
vma->vm_mm, vmf->address & PAGE_MASK,
@ -3632,10 +3632,10 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
&vmf->ptl);
if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
restore_exclusive_pte(vma, page, vmf->address, vmf->pte);
restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
unlock_page(page);
folio_unlock(folio);
mmu_notifier_invalidate_range_end(&range);
return 0;
@ -3835,7 +3835,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_release;
}
locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags);
if (!locked) {
ret |= VM_FAULT_RETRY;