mm: change folio_lock_or_retry to use vm_fault directly

Change folio_lock_or_retry to accept vm_fault struct and return the
vm_fault_t directly.

Link: https://lkml.kernel.org/r/20230630211957.1341547-5-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Suggested-by: Matthew Wilcox <willy@infradead.org>
Acked-by: Peter Xu <peterx@redhat.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Hillf Danton <hdanton@sina.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Laurent Dufour <ldufour@linux.ibm.com>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Michel Lespinasse <michel@lespinasse.org>
Cc: Minchan Kim <minchan@google.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Suren Baghdasaryan 2023-06-30 14:19:55 -07:00 committed by Andrew Morton
parent 4089eef0e6
commit fdc724d6aa
3 changed files with 24 additions and 23 deletions

View File

@ -916,8 +916,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
void __folio_lock(struct folio *folio);
int __folio_lock_killable(struct folio *folio);
bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
unsigned int flags);
vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
void unlock_page(struct page *page);
void folio_unlock(struct folio *folio);
@ -1021,11 +1020,13 @@ static inline int folio_lock_killable(struct folio *folio)
* Return value and mmap_lock implications depend on flags; see
* __folio_lock_or_retry().
*/
static inline bool folio_lock_or_retry(struct folio *folio,
struct mm_struct *mm, unsigned int flags)
static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
struct vm_fault *vmf)
{
might_sleep();
return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
if (!folio_trylock(folio))
return __folio_lock_or_retry(folio, vmf);
return 0;
}
/*

View File

@ -1669,32 +1669,34 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
/*
* Return values:
* true - folio is locked; mmap_lock is still held.
* false - folio is not locked.
* 0 - folio is locked.
* non-zero - folio is not locked.
* mmap_lock has been released (mmap_read_unlock(), unless flags had both
* FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
* which case mmap_lock is still held.
*
* If neither ALLOW_RETRY nor KILLABLE are set, will always return true
* If neither ALLOW_RETRY nor KILLABLE are set, will always return 0
* with the folio locked and the mmap_lock unperturbed.
*/
bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
unsigned int flags)
vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
{
struct mm_struct *mm = vmf->vma->vm_mm;
unsigned int flags = vmf->flags;
if (fault_flag_allow_retry_first(flags)) {
/*
* CAUTION! In this case, mmap_lock is not released
* even though return 0.
* even though return VM_FAULT_RETRY.
*/
if (flags & FAULT_FLAG_RETRY_NOWAIT)
return false;
return VM_FAULT_RETRY;
mmap_read_unlock(mm);
if (flags & FAULT_FLAG_KILLABLE)
folio_wait_locked_killable(folio);
else
folio_wait_locked(folio);
return false;
return VM_FAULT_RETRY;
}
if (flags & FAULT_FLAG_KILLABLE) {
bool ret;
@ -1702,13 +1704,13 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
ret = __folio_lock_killable(folio);
if (ret) {
mmap_read_unlock(mm);
return false;
return VM_FAULT_RETRY;
}
} else {
__folio_lock(folio);
}
return true;
return 0;
}
/**

View File

@ -3599,6 +3599,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
struct folio *folio = page_folio(vmf->page);
struct vm_area_struct *vma = vmf->vma;
struct mmu_notifier_range range;
vm_fault_t ret;
/*
* We need a reference to lock the folio because we don't hold
@ -3611,9 +3612,10 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
if (!folio_try_get(folio))
return 0;
if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) {
ret = folio_lock_or_retry(folio, vmf);
if (ret) {
folio_put(folio);
return VM_FAULT_RETRY;
return ret;
}
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
vma->vm_mm, vmf->address & PAGE_MASK,
@ -3738,7 +3740,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
bool exclusive = false;
swp_entry_t entry;
pte_t pte;
int locked;
vm_fault_t ret = 0;
void *shadow = NULL;
@ -3861,12 +3862,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out_release;
}
locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags);
if (!locked) {
ret |= VM_FAULT_RETRY;
ret |= folio_lock_or_retry(folio, vmf);
if (ret & VM_FAULT_RETRY)
goto out_release;
}
if (swapcache) {
/*