summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-07-17 15:03:12 +0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 21:22:59 +0400
commit2a1e274acf0b1c192face19a4be7c12d4503eaaf (patch)
treef7e98e1fe19d38bb10bf178fb8f8ed1789b659b2 /include/linux
parent769848c03895b63e5662eb7e4ec8c4866f7d0183 (diff)
downloadlinux-2a1e274acf0b1c192face19a4be7c12d4503eaaf.tar.xz
Create the ZONE_MOVABLE zone
The following 8 patches against 2.6.20-mm2 create a zone called ZONE_MOVABLE that is only usable by allocations that specify both __GFP_HIGHMEM and __GFP_MOVABLE. This has the effect of keeping all non-movable pages within a single memory partition while allowing movable allocations to be satisfied from either partition. The patches may be applied with the list-based anti-fragmentation patches that groups pages together based on mobility. The size of the zone is determined by a kernelcore= parameter specified at boot-time. This specifies how much memory is usable by non-movable allocations and the remainder is used for ZONE_MOVABLE. Any range of pages within ZONE_MOVABLE can be released by migrating the pages or by reclaiming. When selecting a zone to take pages from for ZONE_MOVABLE, there are two things to consider. First, only memory from the highest populated zone is used for ZONE_MOVABLE. On the x86, this is probably going to be ZONE_HIGHMEM but it would be ZONE_DMA on ppc64 or possibly ZONE_DMA32 on x86_64. Second, the amount of memory usable by the kernel will be spread evenly throughout NUMA nodes where possible. If the nodes are not of equal size, the amount of memory usable by the kernel on some nodes may be greater than others. By default, the zone is not as useful for hugetlb allocations because they are pinned and non-migratable (currently at least). A sysctl is provided that allows huge pages to be allocated from that zone. This means that the huge page pool can be resized to the size of ZONE_MOVABLE during the lifetime of the system assuming that pages are not mlocked. Despite huge pages being non-movable, we do not introduce additional external fragmentation of note as huge pages are always the largest contiguous block we care about. Credit goes to Andy Whitcroft for catching a large variety of problems during review of the patches. This patch creates an additional zone, ZONE_MOVABLE. This zone is only usable by allocations which specify both __GFP_HIGHMEM and __GFP_MOVABLE. Hot-added memory continues to be placed in their existing destination as there is no mechanism to redirect them to a specific zone. [y-goto@jp.fujitsu.com: Fix section mismatch of memory hotplug related code] [akpm@linux-foundation.org: various fixes] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmzone.h20
-rw-r--r--include/linux/vmstat.h5
4 files changed, 25 insertions, 4 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index e5882fe49f83..bc68dd9a6d41 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -106,6 +106,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
if (flags & __GFP_DMA32)
return ZONE_DMA32;
#endif
+ if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
+ (__GFP_HIGHMEM | __GFP_MOVABLE))
+ return ZONE_MOVABLE;
#ifdef CONFIG_HIGHMEM
if (flags & __GFP_HIGHMEM)
return ZONE_HIGHMEM;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97d0cddfd223..857e44817178 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1005,6 +1005,7 @@ extern unsigned long find_max_pfn_with_active_regions(void);
extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
extern void sparse_memory_present_with_active_regions(int nid);
+extern int cmdline_parse_kernelcore(char *p);
#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
extern int early_pfn_to_nid(unsigned long pfn);
#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 04b1636a970b..d71ff763c9df 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -146,6 +146,7 @@ enum zone_type {
*/
ZONE_HIGHMEM,
#endif
+ ZONE_MOVABLE,
MAX_NR_ZONES
};
@@ -167,6 +168,7 @@ enum zone_type {
+ defined(CONFIG_ZONE_DMA32) \
+ 1 \
+ defined(CONFIG_HIGHMEM) \
+ + 1 \
)
#if __ZONE_COUNT < 2
#define ZONES_SHIFT 0
@@ -499,10 +501,22 @@ static inline int populated_zone(struct zone *zone)
return (!!zone->present_pages);
}
+extern int movable_zone;
+
+static inline int zone_movable_is_highmem(void)
+{
+#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
+ return movable_zone == ZONE_HIGHMEM;
+#else
+ return 0;
+#endif
+}
+
static inline int is_highmem_idx(enum zone_type idx)
{
#ifdef CONFIG_HIGHMEM
- return (idx == ZONE_HIGHMEM);
+ return (idx == ZONE_HIGHMEM ||
+ (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
#else
return 0;
#endif
@@ -522,7 +536,9 @@ static inline int is_normal_idx(enum zone_type idx)
static inline int is_highmem(struct zone *zone)
{
#ifdef CONFIG_HIGHMEM
- return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
+ int zone_idx = zone - zone->zone_pgdat->node_zones;
+ return zone_idx == ZONE_HIGHMEM ||
+ (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
#else
return 0;
#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d9325cf8a134..75370ec0923e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,7 +25,7 @@
#define HIGHMEM_ZONE(xx)
#endif
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx)
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
@@ -170,7 +170,8 @@ static inline unsigned long node_page_state(int node,
#ifdef CONFIG_HIGHMEM
zone_page_state(&zones[ZONE_HIGHMEM], item) +
#endif
- zone_page_state(&zones[ZONE_NORMAL], item);
+ zone_page_state(&zones[ZONE_NORMAL], item) +
+ zone_page_state(&zones[ZONE_MOVABLE], item);
}
extern void zone_statistics(struct zonelist *, struct zone *);