From 59838b2566f6d03099743675a2e0f425813078c6 Mon Sep 17 00:00:00 2001 From: Frank van der Linden Date: Wed, 4 Oct 2023 15:32:48 +0000 Subject: [PATCH] mm, hugetlb: remove HUGETLB_CGROUP_MIN_ORDER Originally, hugetlb_cgroup was the only hugetlb user of tail page structure fields. So, the code defined and checked against HUGETLB_CGROUP_MIN_ORDER to make sure pages weren't too small to use. However, by now, tail page #2 is used to store hugetlb hwpoison and subpool information as well. In other words, without that tail page hugetlb doesn't work. Acknowledge this fact by getting rid of HUGETLB_CGROUP_MIN_ORDER and checks against it. Instead, just check for the minimum viable page order at hstate creation time. Link: https://lkml.kernel.org/r/20231004153248.3842997-1-fvdl@google.com Signed-off-by: Frank van der Linden Reviewed-by: Mike Kravetz Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/hugetlb_cgroup.h | 11 ----------- mm/hugetlb.c | 2 +- mm/hugetlb_cgroup.c | 20 ++------------------ 3 files changed, 3 insertions(+), 30 deletions(-) diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 3d82d91f49ac..e5d64b8b59c2 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -22,13 +22,6 @@ struct resv_map; struct file_region; #ifdef CONFIG_CGROUP_HUGETLB -/* - * Minimum page order trackable by hugetlb cgroup. - * At least 3 pages are necessary for all the tracking information. - * The second tail page contains all of the hugetlb-specific fields. - */ -#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE) - enum hugetlb_memory_event { HUGETLB_MAX, HUGETLB_NR_MEMORY_EVENTS, @@ -68,8 +61,6 @@ static inline struct hugetlb_cgroup * __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); - if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER) - return NULL; if (rsvd) return folio->_hugetlb_cgroup_rsvd; else @@ -91,8 +82,6 @@ static inline void __set_hugetlb_cgroup(struct folio *folio, struct hugetlb_cgroup *h_cg, bool rsvd) { VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); - if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER) - return; if (rsvd) folio->_hugetlb_cgroup_rsvd = h_cg; else diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7ad9d2159da4..e2b1c417b90a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4361,7 +4361,7 @@ void __init hugetlb_add_hstate(unsigned int order) return; } BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); - BUG_ON(order == 0); + BUG_ON(order < order_base_2(__NR_USED_SUBPAGE)); h = &hstates[hugetlb_max_hstate++]; mutex_init(&h->resize_lock); h->order = order; diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index dedd2edb076e..aa4486bd3904 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -262,12 +262,6 @@ static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, if (hugetlb_cgroup_disabled()) goto done; - /* - * We don't charge any cgroup if the compound page have less - * than 3 pages. - */ - if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) - goto done; again: rcu_read_lock(); h_cg = hugetlb_cgroup_from_task(current); @@ -397,9 +391,6 @@ static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, if (hugetlb_cgroup_disabled() || !h_cg) return; - if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) - return; - page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx, rsvd), nr_pages); @@ -869,15 +860,8 @@ void __init hugetlb_cgroup_file_init(void) { struct hstate *h; - for_each_hstate(h) { - /* - * Add cgroup control files only if the huge page consists - * of more than two normal pages. This is because we use - * page[2].private for storing cgroup details. - */ - if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) - __hugetlb_cgroup_file_init(hstate_index(h)); - } + for_each_hstate(h) + __hugetlb_cgroup_file_init(hstate_index(h)); } /*