summaryrefslogtreecommitdiff
path: root/include/linux/hugetlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/hugetlb.h')
-rw-r--r--include/linux/hugetlb.h22
1 files changed, 12 insertions, 10 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index b65166de1d9d..255cd5cc0754 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -343,6 +343,11 @@ static inline unsigned huge_page_shift(struct hstate *h)
return h->order + PAGE_SHIFT;
}
+static inline bool hstate_is_gigantic(struct hstate *h)
+{
+ return huge_page_order(h) >= MAX_ORDER;
+}
+
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
return 1 << h->order;
@@ -392,15 +397,13 @@ static inline pgoff_t basepage_index(struct page *page)
extern void dissolve_free_huge_pages(unsigned long start_pfn,
unsigned long end_pfn);
-int pmd_huge_support(void);
-/*
- * Currently hugepage migration is enabled only for pmd-based hugepage.
- * This function will be updated when hugepage migration is more widely
- * supported.
- */
-static inline int hugepage_migration_support(struct hstate *h)
+static inline int hugepage_migration_supported(struct hstate *h)
{
- return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
+#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
+ return huge_page_shift(h) == PMD_SHIFT;
+#else
+ return 0;
+#endif
}
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
@@ -450,8 +453,7 @@ static inline pgoff_t basepage_index(struct page *page)
return page->index;
}
#define dissolve_free_huge_pages(s, e) do {} while (0)
-#define pmd_huge_support() 0
-#define hugepage_migration_support(h) 0
+#define hugepage_migration_supported(h) 0
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
struct mm_struct *mm, pte_t *pte)