summaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h57
1 files changed, 42 insertions, 15 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 230180c3eb61..656b588a9f96 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -14,6 +14,7 @@
#include <linux/seqlock.h>
#include <linux/nodemask.h>
#include <asm/atomic.h>
+#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
@@ -45,6 +46,27 @@ struct zone_padding {
#define ZONE_PADDING(name)
#endif
+enum zone_stat_item {
+ NR_ANON_PAGES, /* Mapped anonymous pages */
+ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
+ only modified from process context */
+ NR_FILE_PAGES,
+ NR_SLAB, /* Pages used by slab allocator */
+ NR_PAGETABLE, /* used for pagetables */
+ NR_FILE_DIRTY,
+ NR_WRITEBACK,
+ NR_UNSTABLE_NFS, /* NFS unstable pages */
+ NR_BOUNCE,
+#ifdef CONFIG_NUMA
+ NUMA_HIT, /* allocated in intended node */
+ NUMA_MISS, /* allocated in non intended node */
+ NUMA_FOREIGN, /* was intended here, hit elsewhere */
+ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
+ NUMA_LOCAL, /* allocation from local node */
+ NUMA_OTHER, /* allocation from other node */
+#endif
+ NR_VM_ZONE_STAT_ITEMS };
+
struct per_cpu_pages {
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
@@ -54,13 +76,8 @@ struct per_cpu_pages {
struct per_cpu_pageset {
struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
-#ifdef CONFIG_NUMA
- unsigned long numa_hit; /* allocated in intended node */
- unsigned long numa_miss; /* allocated in non intended node */
- unsigned long numa_foreign; /* was intended here, hit elsewhere */
- unsigned long interleave_hit; /* interleaver prefered this zone */
- unsigned long local_node; /* allocation from local node */
- unsigned long other_node; /* allocation from other node */
+#ifdef CONFIG_SMP
+ s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
} ____cacheline_aligned_in_smp;
@@ -133,6 +150,10 @@ struct zone {
unsigned long lowmem_reserve[MAX_NR_ZONES];
#ifdef CONFIG_NUMA
+ /*
+ * zone reclaim becomes active if more unmapped pages exist.
+ */
+ unsigned long min_unmapped_ratio;
struct per_cpu_pageset *pageset[NR_CPUS];
#else
struct per_cpu_pageset pageset[NR_CPUS];
@@ -164,12 +185,8 @@ struct zone {
/* A count of how many reclaimers are scanning this zone */
atomic_t reclaim_in_progress;
- /*
- * timestamp (in jiffies) of the last zone reclaim that did not
- * result in freeing of pages. This is used to avoid repeated scans
- * if all memory in the zone is in use.
- */
- unsigned long last_unsuccessful_zone_reclaim;
+ /* Zone statistics */
+ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
/*
* prev_priority holds the scanning priority for this zone. It is
@@ -196,7 +213,7 @@ struct zone {
/*
* wait_table -- the array holding the hash table
- * wait_table_size -- the size of the hash table array
+ * wait_table_hash_nr_entries -- the size of the hash table array
* wait_table_bits -- wait_table_size == (1 << wait_table_bits)
*
* The purpose of all these is to keep track of the people
@@ -219,7 +236,7 @@ struct zone {
* free_area_init_core() performs the initialization of them.
*/
wait_queue_head_t * wait_table;
- unsigned long wait_table_size;
+ unsigned long wait_table_hash_nr_entries;
unsigned long wait_table_bits;
/*
@@ -332,6 +349,9 @@ void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags);
+extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
+ unsigned long size);
+
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
@@ -398,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
void __user *, size_t *, loff_t *);
+int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
+ struct file *, void __user *, size_t *, loff_t *);
#include <linux/topology.h>
/* Returns the number of the current Node. */
@@ -505,6 +527,10 @@ struct mem_section {
* pages. However, it is stored with some other magic.
* (see sparse.c::sparse_init_one_section())
*
+ * Additionally during early boot we encode node id of
+ * the location of the section here to guide allocation.
+ * (see sparse.c::memory_present())
+ *
* Making it a UL at least makes someone do a cast
* before using it wrong.
*/
@@ -544,6 +570,7 @@ extern int __section_nr(struct mem_section* ms);
#define SECTION_HAS_MEM_MAP (1UL<<1)
#define SECTION_MAP_LAST_BIT (1UL<<2)
#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
+#define SECTION_NID_SHIFT 2
static inline struct page *__section_mem_map_addr(struct mem_section *section)
{