summaryrefslogtreecommitdiff
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorMiaohe Lin <linmiaohe@huawei.com>2021-05-05 04:33:22 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-05 21:27:20 +0300
commit04adbc3f7bff403a97355531da0190a263d66ea5 (patch)
treef6e3e2d3051a250fd1346fcb1cef0571ae4bf46e /mm/hugetlb.c
parent4bfb68a0858deae4c40ea585037a3261f0717b0a (diff)
downloadlinux-04adbc3f7bff403a97355531da0190a263d66ea5.tar.xz
mm/hugetlb: use some helper functions to cleanup code
Patch series "Some cleanups for hugetlb". This series contains cleanups to remove unnecessary VM_BUG_ON_PAGE, use helper function and so on. I also collect some previous patches into this series in case they are forgotten. This patch (of 5): We could use pages_per_huge_page to get the number of pages per hugepage, use get_hstate_idx to calculate hstate index, and use hstate_is_gigantic to check if a hstate is gigantic to make code more succinct. Link: https://lkml.kernel.org/r/20210308112809.26107-1-linmiaohe@huawei.com Link: https://lkml.kernel.org/r/20210308112809.26107-2-linmiaohe@huawei.com Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c0d4c7784a84..87104dc78e59 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1273,7 +1273,7 @@ static void free_gigantic_page(struct page *page, unsigned int order)
static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
- unsigned long nr_pages = 1UL << huge_page_order(h);
+ unsigned long nr_pages = pages_per_huge_page(h);
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
@@ -3267,10 +3267,10 @@ static int __init hugepages_setup(char *s)
/*
* Global state is always initialized later in hugetlb_init.
- * But we need to allocate >= MAX_ORDER hstates here early to still
+ * But we need to allocate gigantic hstates here early to still
* use the bootmem allocator.
*/
- if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
+ if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
hugetlb_hstate_alloc_pages(parsed_hstate);
last_mhp = mhp;