mm/filemap: Add folio_wait_locked()
Also add folio_wait_locked_killable(). Turn wait_on_page_locked() and wait_on_page_locked_killable() into wrappers. This eliminates a call to compound_head() from each call-site, reducing text size by 193 bytes for me. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Acked-by: Jeff Layton <jlayton@kernel.org> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: William Kucharski <william.kucharski@oracle.com> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Mike Rapoport <rppt@linux.ibm.com>
This commit is contained in:
parent
ffdc8dabf2
commit
6baa8d602e
|
@ -732,23 +732,33 @@ extern void wait_on_page_bit(struct page *page, int bit_nr);
|
||||||
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
|
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait for a page to be unlocked.
|
* Wait for a folio to be unlocked.
|
||||||
*
|
*
|
||||||
* This must be called with the caller "holding" the page,
|
* This must be called with the caller "holding" the folio,
|
||||||
* ie with increased "page->count" so that the page won't
|
* ie with increased "page->count" so that the folio won't
|
||||||
* go away during the wait..
|
* go away during the wait..
|
||||||
*/
|
*/
|
||||||
|
static inline void folio_wait_locked(struct folio *folio)
|
||||||
|
{
|
||||||
|
if (folio_test_locked(folio))
|
||||||
|
wait_on_page_bit(&folio->page, PG_locked);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int folio_wait_locked_killable(struct folio *folio)
|
||||||
|
{
|
||||||
|
if (!folio_test_locked(folio))
|
||||||
|
return 0;
|
||||||
|
return wait_on_page_bit_killable(&folio->page, PG_locked);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void wait_on_page_locked(struct page *page)
|
static inline void wait_on_page_locked(struct page *page)
|
||||||
{
|
{
|
||||||
if (PageLocked(page))
|
folio_wait_locked(page_folio(page));
|
||||||
wait_on_page_bit(compound_head(page), PG_locked);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int wait_on_page_locked_killable(struct page *page)
|
static inline int wait_on_page_locked_killable(struct page *page)
|
||||||
{
|
{
|
||||||
if (!PageLocked(page))
|
return folio_wait_locked_killable(page_folio(page));
|
||||||
return 0;
|
|
||||||
return wait_on_page_bit_killable(compound_head(page), PG_locked);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int put_and_wait_on_page_locked(struct page *page, int state);
|
int put_and_wait_on_page_locked(struct page *page, int state);
|
||||||
|
|
|
@ -1704,9 +1704,9 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
|
||||||
|
|
||||||
mmap_read_unlock(mm);
|
mmap_read_unlock(mm);
|
||||||
if (flags & FAULT_FLAG_KILLABLE)
|
if (flags & FAULT_FLAG_KILLABLE)
|
||||||
wait_on_page_locked_killable(page);
|
folio_wait_locked_killable(folio);
|
||||||
else
|
else
|
||||||
wait_on_page_locked(page);
|
folio_wait_locked(folio);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (flags & FAULT_FLAG_KILLABLE) {
|
if (flags & FAULT_FLAG_KILLABLE) {
|
||||||
|
|
Loading…
Reference in New Issue