summaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h30
1 files changed, 21 insertions, 9 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2427706f78b4..2daa54f55db7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -188,7 +188,7 @@ static inline int is_unevictable_lru(enum lru_list lru)
struct zone_reclaim_stat {
/*
* The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are refeferenced.
+ * mem/swap backed and file backed pages are referenced.
* The higher the rotated/scanned ratio, the more valuable
* that cache is.
*
@@ -201,7 +201,7 @@ struct zone_reclaim_stat {
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
struct zone *zone;
#endif
};
@@ -209,7 +209,6 @@ struct lruvec {
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
-#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
/* Isolate clean file */
@@ -369,6 +368,10 @@ struct zone {
*/
spinlock_t lock;
int all_unreclaimable; /* All pages pinned */
+#if defined CONFIG_COMPACTION || defined CONFIG_CMA
+ /* pfn where the last incremental compaction isolated free pages */
+ unsigned long compact_cached_free_pfn;
+#endif
#ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t span_seqlock;
@@ -475,6 +478,14 @@ struct zone {
* rarely used fields:
*/
const char *name;
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * the number of MIGRATE_ISOLATE *pageblock*.
+ * We need this for free page counting. Look at zone_watermark_ok_safe.
+ * It's protected by zone->lock
+ */
+ int nr_pageblock_isolate;
+#endif
} ____cacheline_internodealigned_in_smp;
typedef enum {
@@ -671,7 +682,7 @@ typedef struct pglist_data {
int nr_zones;
#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
struct page *node_mem_map;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
struct page_cgroup *node_page_cgroup;
#endif
#endif
@@ -694,7 +705,8 @@ typedef struct pglist_data {
range, including holes */
int node_id;
wait_queue_head_t kswapd_wait;
- struct task_struct *kswapd;
+ wait_queue_head_t pfmemalloc_wait;
+ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
int kswapd_max_order;
enum zone_type classzone_idx;
} pg_data_t;
@@ -718,7 +730,7 @@ typedef struct pglist_data {
#include <linux/memory_hotplug.h>
extern struct mutex zonelists_mutex;
-void build_all_zonelists(void *data);
+void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
@@ -736,7 +748,7 @@ extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
static inline struct zone *lruvec_zone(struct lruvec *lruvec)
{
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
return lruvec->zone;
#else
return container_of(lruvec, struct zone, lruvec);
@@ -773,7 +785,7 @@ extern int movable_zone;
static inline int zone_movable_is_highmem(void)
{
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE)
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
return movable_zone == ZONE_HIGHMEM;
#else
return 0;
@@ -1052,7 +1064,7 @@ struct mem_section {
/* See declaration of similar field in struct zone */
unsigned long *pageblock_flags;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#ifdef CONFIG_MEMCG
/*
* If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
* section. (see memcontrol.h/page_cgroup.h about this.)