mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 21:03:32 +00:00
hugetlb: check for hugetlb folio before vmemmap_restore
In commitd8f5f7e445
("hugetlb: set hugetlb page flag before optimizing vmemmap") checks were added to print a warning if hugetlb_vmemmap_restore was called on a non-hugetlb page. This was mostly due to ordering issues in the hugetlb page set up and tear down sequencees. One place missed was the routine dissolve_free_huge_page. Naoya Horiguchi noted: "I saw that VM_WARN_ON_ONCE() in hugetlb_vmemmap_restore is triggered when memory_failure() is called on a free hugetlb page with vmemmap optimization disabled (the warning is not triggered if vmemmap optimization is enabled). I think that we need check folio_test_hugetlb() before dissolve_free_huge_page() calls hugetlb_vmemmap_restore_folio()." Perform the check as suggested by Naoya. Link: https://lkml.kernel.org/r/20231017032140.GA3680@monkey Fixes:d8f5f7e445
("hugetlb: set hugetlb page flag before optimizing vmemmap") Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Suggested-by: Naoya Horiguchi <naoya.horiguchi@linux.dev> Tested-by: Naoya Horiguchi <naoya.horiguchi@linux.dev> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Barry Song <song.bao.hua@hisilicon.com> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Joao Martins <joao.m.martins@oracle.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5ef8f1b2b4
commit
30a89adf87
1 changed files with 15 additions and 9 deletions
24
mm/hugetlb.c
24
mm/hugetlb.c
|
@ -2322,17 +2322,23 @@ int dissolve_free_huge_page(struct page *page)
|
|||
* need to adjust max_huge_pages if the page is not freed.
|
||||
* Attempt to allocate vmemmmap here so that we can take
|
||||
* appropriate action on failure.
|
||||
*
|
||||
* The folio_test_hugetlb check here is because
|
||||
* remove_hugetlb_folio will clear hugetlb folio flag for
|
||||
* non-vmemmap optimized hugetlb folios.
|
||||
*/
|
||||
rc = hugetlb_vmemmap_restore(h, &folio->page);
|
||||
if (!rc) {
|
||||
update_and_free_hugetlb_folio(h, folio, false);
|
||||
} else {
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
add_hugetlb_folio(h, folio, false);
|
||||
h->max_huge_pages++;
|
||||
spin_unlock_irq(&hugetlb_lock);
|
||||
}
|
||||
if (folio_test_hugetlb(folio)) {
|
||||
rc = hugetlb_vmemmap_restore(h, &folio->page);
|
||||
if (rc) {
|
||||
spin_lock_irq(&hugetlb_lock);
|
||||
add_hugetlb_folio(h, folio, false);
|
||||
h->max_huge_pages++;
|
||||
goto out;
|
||||
}
|
||||
} else
|
||||
rc = 0;
|
||||
|
||||
update_and_free_hugetlb_folio(h, folio, false);
|
||||
return rc;
|
||||
}
|
||||
out:
|
||||
|
|
Loading…
Reference in a new issue