mm/hugetlb_cgroup: convert hugetlb_cgroup_uncharge_page() to folios

[ Upstream commit d4ab0316cc ]

Continue to use a folio inside free_huge_page() by converting
hugetlb_cgroup_uncharge_page*() to folios.

Link: https://lkml.kernel.org/r/20221101223059.460937-8-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Stable-dep-of: b76b46902c ("mm/hugetlb: fix missing hugetlb_lock for resv uncharge")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Sidhartha Kumar 2022-11-01 15:30:57 -07:00 committed by Greg Kroah-Hartman
parent 10de76f4cd
commit cc8f0d90ba
3 changed files with 27 additions and 25 deletions

View File

@ -158,10 +158,10 @@ extern void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
extern void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page);
extern void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page);
extern void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
struct page *page);
extern void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct folio *folio);
extern void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
struct folio *folio);
extern void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg);
@ -254,14 +254,14 @@ hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
{
}
static inline void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page)
static inline void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct folio *folio)
{
}
static inline void hugetlb_cgroup_uncharge_page_rsvd(int idx,
static inline void hugetlb_cgroup_uncharge_folio_rsvd(int idx,
unsigned long nr_pages,
struct page *page)
struct folio *folio)
{
}
static inline void hugetlb_cgroup_uncharge_cgroup(int idx,

View File

@ -1956,10 +1956,10 @@ void free_huge_page(struct page *page)
spin_lock_irqsave(&hugetlb_lock, flags);
folio_clear_hugetlb_migratable(folio);
hugetlb_cgroup_uncharge_page(hstate_index(h),
pages_per_huge_page(h), page);
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
pages_per_huge_page(h), page);
hugetlb_cgroup_uncharge_folio(hstate_index(h),
pages_per_huge_page(h), folio);
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio);
if (restore_reserve)
h->resv_huge_pages++;
@ -3082,6 +3082,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma);
struct page *page;
struct folio *folio;
long map_chg, map_commit;
long gbl_chg;
int ret, idx;
@ -3145,6 +3146,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
* a reservation exists for the allocation.
*/
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
if (!page) {
spin_unlock_irq(&hugetlb_lock);
page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
@ -3159,6 +3161,7 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
set_page_refcounted(page);
/* Fall through */
}
folio = page_folio(page);
hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
/* If allocation is not consuming a reservation, also store the
* hugetlb_cgroup pointer on the page.
@ -3188,8 +3191,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma,
rsv_adjust = hugepage_subpool_put_pages(spool, 1);
hugetlb_acct_memory(h, -rsv_adjust);
if (deferred_reserve)
hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
pages_per_huge_page(h), page);
hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
pages_per_huge_page(h), folio);
}
return page;

View File

@ -346,11 +346,10 @@ void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
/*
* Should be called with hugetlb_lock held
*/
static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page, bool rsvd)
static void __hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct folio *folio, bool rsvd)
{
struct hugetlb_cgroup *h_cg;
struct folio *folio = page_folio(page);
if (hugetlb_cgroup_disabled())
return;
@ -368,27 +367,27 @@ static void __hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
css_put(&h_cg->css);
else {
unsigned long usage =
h_cg->nodeinfo[page_to_nid(page)]->usage[idx];
h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
/*
* This write is not atomic due to fetching usage and writing
* to it, but that's fine because we call this with
* hugetlb_lock held anyway.
*/
WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx],
WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
usage - nr_pages);
}
}
void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages,
struct page *page)
void hugetlb_cgroup_uncharge_folio(int idx, unsigned long nr_pages,
struct folio *folio)
{
__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, false);
__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, false);
}
void hugetlb_cgroup_uncharge_page_rsvd(int idx, unsigned long nr_pages,
struct page *page)
void hugetlb_cgroup_uncharge_folio_rsvd(int idx, unsigned long nr_pages,
struct folio *folio)
{
__hugetlb_cgroup_uncharge_page(idx, nr_pages, page, true);
__hugetlb_cgroup_uncharge_folio(idx, nr_pages, folio, true);
}
static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,