summaryrefslogtreecommitdiff
path: root/mm/hugetlb_cgroup.c
diff options
context:
space:
mode:
authorFrank van der Linden <fvdl@google.com>2023-10-04 18:32:48 +0300
committerAndrew Morton <akpm@linux-foundation.org>2023-10-19 00:34:17 +0300
commit59838b2566f6d03099743675a2e0f425813078c6 (patch)
tree18db38fad872d7c95c83822deb26c5a4fece3c8f /mm/hugetlb_cgroup.c
parent2580d554585c52a644839864ef9238af5b030ebc (diff)
downloadlinux-59838b2566f6d03099743675a2e0f425813078c6.tar.xz
mm, hugetlb: remove HUGETLB_CGROUP_MIN_ORDER
Originally, hugetlb_cgroup was the only hugetlb user of tail page structure fields. So, the code defined and checked against HUGETLB_CGROUP_MIN_ORDER to make sure pages weren't too small to use. However, by now, tail page #2 is used to store hugetlb hwpoison and subpool information as well. In other words, without that tail page hugetlb doesn't work. Acknowledge this fact by getting rid of HUGETLB_CGROUP_MIN_ORDER and checks against it. Instead, just check for the minimum viable page order at hstate creation time. Link: https://lkml.kernel.org/r/20231004153248.3842997-1-fvdl@google.com Signed-off-by: Frank van der Linden <fvdl@google.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb_cgroup.c')
-rw-r--r--mm/hugetlb_cgroup.c20
1 files changed, 2 insertions, 18 deletions
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index dedd2edb076e..aa4486bd3904 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -262,12 +262,6 @@ static int __hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
if (hugetlb_cgroup_disabled())
goto done;
- /*
- * We don't charge any cgroup if the compound page have less
- * than 3 pages.
- */
- if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
- goto done;
again:
rcu_read_lock();
h_cg = hugetlb_cgroup_from_task(current);
@@ -397,9 +391,6 @@ static void __hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
if (hugetlb_cgroup_disabled() || !h_cg)
return;
- if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER)
- return;
-
page_counter_uncharge(__hugetlb_cgroup_counter_from_cgroup(h_cg, idx,
rsvd),
nr_pages);
@@ -869,15 +860,8 @@ void __init hugetlb_cgroup_file_init(void)
{
struct hstate *h;
- for_each_hstate(h) {
- /*
- * Add cgroup control files only if the huge page consists
- * of more than two normal pages. This is because we use
- * page[2].private for storing cgroup details.
- */
- if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
- __hugetlb_cgroup_file_init(hstate_index(h));
- }
+ for_each_hstate(h)
+ __hugetlb_cgroup_file_init(hstate_index(h));
}
/*