summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h82
1 files changed, 25 insertions, 57 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 906c46a05707..00bad7793788 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -430,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page,
#endif
}
-static inline struct page *compound_head_by_tail(struct page *tail)
-{
- struct page *head = tail->first_page;
-
- /*
- * page->first_page may be a dangling pointer to an old
- * compound page, so recheck that it is still a tail
- * page before returning.
- */
- smp_rmb();
- if (likely(PageTail(tail)))
- return head;
- return tail;
-}
-
-/*
- * Since either compound page could be dismantled asynchronously in THP
- * or we access asynchronously arbitrary positioned struct page, there
- * would be tail flag race. To handle this race, we should call
- * smp_rmb() before checking tail flag. compound_head_by_tail() did it.
- */
-static inline struct page *compound_head(struct page *page)
-{
- if (unlikely(PageTail(page)))
- return compound_head_by_tail(page);
- return page;
-}
-
-/*
- * If we access compound page synchronously such as access to
- * allocated page, there is no need to handle tail flag race, so we can
- * check tail flag directly without any synchronization primitive.
- */
-static inline struct page *compound_head_fast(struct page *page)
-{
- if (unlikely(PageTail(page)))
- return page->first_page;
- return page;
-}
-
/*
* The atomic page->_mapcount, starts from -1: so that transitions
* both from it and to it can be tracked, using atomic_inc_and_test
@@ -518,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page)
VM_BUG_ON_PAGE(!PageTail(page), page);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
- if (compound_tail_refcounted(page->first_page))
+ if (compound_tail_refcounted(compound_head(page)))
atomic_inc(&page->_mapcount);
}
@@ -541,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x)
{
struct page *page = virt_to_page(x);
- /*
- * We don't need to worry about synchronization of tail flag
- * when we call virt_to_head_page() since it is only called for
- * already allocated page and this page won't be freed until
- * this virt_to_head_page() is finished. So use _fast variant.
- */
- return compound_head_fast(page);
+ return compound_head(page);
}
/*
@@ -568,28 +522,42 @@ int split_free_page(struct page *page);
/*
* Compound pages have a destructor function. Provide a
* prototype for that function and accessor functions.
- * These are _only_ valid on the head of a PG_compound page.
+ * These are _only_ valid on the head of a compound page.
*/
+typedef void compound_page_dtor(struct page *);
+
+/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
+enum compound_dtor_id {
+ NULL_COMPOUND_DTOR,
+ COMPOUND_PAGE_DTOR,
+#ifdef CONFIG_HUGETLB_PAGE
+ HUGETLB_PAGE_DTOR,
+#endif
+ NR_COMPOUND_DTORS,
+};
+extern compound_page_dtor * const compound_page_dtors[];
static inline void set_compound_page_dtor(struct page *page,
- compound_page_dtor *dtor)
+ enum compound_dtor_id compound_dtor)
{
- page[1].compound_dtor = dtor;
+ VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
+ page[1].compound_dtor = compound_dtor;
}
static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
- return page[1].compound_dtor;
+ VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
+ return compound_page_dtors[page[1].compound_dtor];
}
-static inline int compound_order(struct page *page)
+static inline unsigned int compound_order(struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
-static inline void set_compound_order(struct page *page, unsigned long order)
+static inline void set_compound_order(struct page *page, unsigned int order)
{
page[1].compound_order = order;
}
@@ -1572,8 +1540,7 @@ static inline bool ptlock_init(struct page *page)
* with 0. Make sure nobody took it in use in between.
*
* It can happen if arch try to use slab for page table allocation:
- * slab code uses page->slab_cache and page->first_page (for tail
- * pages), which share storage with page->ptl.
+ * slab code uses page->slab_cache, which share storage with page->ptl.
*/
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
if (!ptlock_alloc(page))
@@ -1843,7 +1810,8 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern __printf(3, 4)
-void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
+void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
+ const char *fmt, ...);
extern void setup_per_cpu_pageset(void);