migrate: convert unmap_and_move_huge_page() to use folios

Saves several calls to compound_head() and removes a couple of uses of
page->lru.

Link: https://lkml.kernel.org/r/20220902194653.1739778-52-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-09-02 20:46:47 +01:00 committed by Andrew Morton
parent 682a71a1b6
commit c33db29231
1 changed files with 14 additions and 14 deletions

View File

@ -1263,7 +1263,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (!hugepage_migration_supported(page_hstate(hpage)))
return -ENOSYS;
if (page_count(hpage) == 1) {
if (folio_ref_count(src) == 1) {
/* page was freed from under us. So we are done. */
putback_active_hugepage(hpage);
return MIGRATEPAGE_SUCCESS;
@ -1274,7 +1274,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
return -ENOMEM;
dst = page_folio(new_hpage);
if (!trylock_page(hpage)) {
if (!folio_trylock(src)) {
if (!force)
goto out;
switch (mode) {
@ -1284,29 +1284,29 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
default:
goto out;
}
lock_page(hpage);
folio_lock(src);
}
/*
* Check for pages which are in the process of being freed. Without
* page_mapping() set, hugetlbfs specific move page routine will not
* folio_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools.
*/
if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
if (hugetlb_page_subpool(hpage) && !folio_mapping(src)) {
rc = -EBUSY;
goto out_unlock;
}
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
if (folio_test_anon(src))
anon_vma = page_get_anon_vma(&src->page);
if (unlikely(!trylock_page(new_hpage)))
if (unlikely(!folio_trylock(dst)))
goto put_anon;
if (page_mapped(hpage)) {
if (folio_mapped(src)) {
enum ttu_flags ttu = 0;
if (!PageAnon(hpage)) {
if (!folio_test_anon(src)) {
/*
* In shared mappings, try_to_unmap could potentially
* call huge_pmd_unshare. Because of this, take
@ -1327,7 +1327,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
i_mmap_unlock_write(mapping);
}
if (!page_mapped(hpage))
if (!folio_mapped(src))
rc = move_to_new_folio(dst, src, mode);
if (page_was_mapped)
@ -1335,7 +1335,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
unlock_put_anon:
unlock_page(new_hpage);
folio_unlock(dst);
put_anon:
if (anon_vma)
@ -1347,12 +1347,12 @@ put_anon:
}
out_unlock:
unlock_page(hpage);
folio_unlock(src);
out:
if (rc == MIGRATEPAGE_SUCCESS)
putback_active_hugepage(hpage);
else if (rc != -EAGAIN)
list_move_tail(&hpage->lru, ret);
list_move_tail(&src->lru, ret);
/*
* If migration was not successful and there's a freeing callback, use