summaryrefslogtreecommitdiff
path: root/include/linux/pagemap.h
diff options
context:
space:
mode:
authorPaul Jackson <pj@sgi.com>2006-03-24 14:16:04 +0300
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-24 18:33:22 +0300
commit44110fe385af23ca5eee8a6ad4ff55d50339097a (patch)
tree50ed2bfe054b8e35968d8e4a5fbe95c8b3db843b /include/linux/pagemap.h
parent825a46af5ac171f9f41f794a0a00165588ba1589 (diff)
downloadlinux-44110fe385af23ca5eee8a6ad4ff55d50339097a.tar.xz
[PATCH] cpuset memory spread page cache implementation and hooks
Change the page cache allocation calls to support cpuset memory spreading. See the previous patch, cpuset_mem_spread, for an explanation of cpuset memory spreading. On systems without cpusets configured in the kernel, this is no change. On systems with cpusets configured in the kernel, but the "memory_spread" cpuset option not enabled for the current tasks cpuset, this adds a call to a cpuset routine and failed bit test of the processor state flag PF_SPREAD_PAGE. On tasks in cpusets with "memory_spread" enabled, this adds a call to a cpuset routine that computes which of the tasks mems_allowed nodes should be preferred for this allocation. If memory spreading applies to a particular allocation, then any other NUMA mempolicy does not apply. Signed-off-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/pagemap.h')
-rw-r--r--include/linux/pagemap.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ee700c6eb442..839f0b3c23aa 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -51,6 +51,10 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
#define page_cache_release(page) put_page(page)
void release_pages(struct page **pages, int nr, int cold);
+#ifdef CONFIG_NUMA
+extern struct page *page_cache_alloc(struct address_space *x);
+extern struct page *page_cache_alloc_cold(struct address_space *x);
+#else
static inline struct page *page_cache_alloc(struct address_space *x)
{
return alloc_pages(mapping_gfp_mask(x), 0);
@@ -60,6 +64,7 @@ static inline struct page *page_cache_alloc_cold(struct address_space *x)
{
return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
}
+#endif
typedef int filler_t(void *, struct page *);