summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/filemap_xip.c1
-rw-r--r--mm/madvise.c1
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mlock.c11
-rw-r--r--mm/msync.c1
-rw-r--r--mm/page_alloc.c49
-rw-r--r--mm/rmap.c66
-rw-r--r--mm/shmem.c8
-rw-r--r--mm/slab.c59
-rw-r--r--mm/slob.c53
-rw-r--r--mm/slub.c238
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/vmstat.c1
15 files changed, 283 insertions, 214 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 7b48b2ad00e7..edb1b0b5cc8d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -670,7 +670,8 @@ repeat:
page = find_lock_page(mapping, index);
if (!page) {
if (!cached_page) {
- cached_page = alloc_page(gfp_mask);
+ cached_page =
+ __page_cache_alloc(gfp_mask);
if (!cached_page)
return NULL;
}
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 1b49dab9b25d..fa360e566d88 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/uio.h>
#include <linux/rmap.h>
+#include <linux/sched.h>
#include <asm/tlbflush.h>
#include "filemap.h"
diff --git a/mm/madvise.c b/mm/madvise.c
index e75096b5a6d3..60542d006ec1 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -10,6 +10,7 @@
#include <linux/syscalls.h>
#include <linux/mempolicy.h>
#include <linux/hugetlb.h>
+#include <linux/sched.h>
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
diff --git a/mm/memory.c b/mm/memory.c
index 1d647ab0ee72..cb94488ab96d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
page = vm_normal_page(vma, addr, pte);
if (page) {
get_page(page);
- page_dup_rmap(page);
+ page_dup_rmap(page, vma, addr);
rss[!!PageAnon(page)]++;
}
diff --git a/mm/mlock.c b/mm/mlock.c
index 3446b7ef731e..4d3fea267e0d 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -10,7 +10,18 @@
#include <linux/mm.h>
#include <linux/mempolicy.h>
#include <linux/syscalls.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+int can_do_mlock(void)
+{
+ if (capable(CAP_IPC_LOCK))
+ return 1;
+ if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(can_do_mlock);
static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
unsigned long start, unsigned long end, unsigned int newflags)
diff --git a/mm/msync.c b/mm/msync.c
index 358d73cf7b78..144a7570535d 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -12,6 +12,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/syscalls.h>
+#include <linux/sched.h>
/*
* MS_SYNC syncs the entire file - including mappings.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ae96dd844432..d8970623c566 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -136,6 +136,11 @@ static unsigned long __meminitdata dma_reserve;
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+#if MAX_NUMNODES > 1
+int nr_node_ids __read_mostly = MAX_NUMNODES;
+EXPORT_SYMBOL(nr_node_ids);
+#endif
+
#ifdef CONFIG_DEBUG_VM
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
{
@@ -669,26 +674,6 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
-#if MAX_NUMNODES > 1
-int nr_node_ids __read_mostly = MAX_NUMNODES;
-EXPORT_SYMBOL(nr_node_ids);
-
-/*
- * Figure out the number of possible node ids.
- */
-static void __init setup_nr_node_ids(void)
-{
- unsigned int node;
- unsigned int highest = 0;
-
- for_each_node_mask(node, node_possible_map)
- highest = node;
- nr_node_ids = highest + 1;
-}
-#else
-static void __init setup_nr_node_ids(void) {}
-#endif
-
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2165,7 +2150,7 @@ void __init setup_per_cpu_pageset(void)
#endif
-static __meminit noinline
+static noinline __init_refok
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
int i;
@@ -2678,7 +2663,7 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
}
}
-static void __meminit alloc_node_mem_map(struct pglist_data *pgdat)
+static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
{
/* Skip empty nodes */
if (!pgdat->node_spanned_pages)
@@ -2733,6 +2718,26 @@ void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
}
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+
+#if MAX_NUMNODES > 1
+/*
+ * Figure out the number of possible node ids.
+ */
+static void __init setup_nr_node_ids(void)
+{
+ unsigned int node;
+ unsigned int highest = 0;
+
+ for_each_node_mask(node, node_possible_map)
+ highest = node;
+ nr_node_ids = highest + 1;
+}
+#else
+static inline void setup_nr_node_ids(void)
+{
+}
+#endif
+
/**
* add_active_range - Register a range of PFNs backed by physical memory
* @nid: The node ID the range resides on
diff --git a/mm/rmap.c b/mm/rmap.c
index 304f51985c78..850165d32b7a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -162,12 +162,10 @@ void anon_vma_unlink(struct vm_area_struct *vma)
static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
unsigned long flags)
{
- if (flags & SLAB_CTOR_CONSTRUCTOR) {
- struct anon_vma *anon_vma = data;
+ struct anon_vma *anon_vma = data;
- spin_lock_init(&anon_vma->lock);
- INIT_LIST_HEAD(&anon_vma->head);
- }
+ spin_lock_init(&anon_vma->lock);
+ INIT_LIST_HEAD(&anon_vma->head);
}
void __init anon_vma_init(void)
@@ -532,19 +530,51 @@ static void __page_set_anon_rmap(struct page *page,
}
/**
+ * page_set_anon_rmap - sanity check anonymous rmap addition
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ */
+static void __page_check_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+#ifdef CONFIG_DEBUG_VM
+ /*
+ * The page's anon-rmap details (mapping and index) are guaranteed to
+ * be set up correctly at this point.
+ *
+ * We have exclusion against page_add_anon_rmap because the caller
+ * always holds the page locked, except if called from page_dup_rmap,
+ * in which case the page is already known to be setup.
+ *
+ * We have exclusion against page_add_new_anon_rmap because those pages
+ * are initially only visible via the pagetables, and the pte is locked
+ * over the call to page_add_new_anon_rmap.
+ */
+ struct anon_vma *anon_vma = vma->anon_vma;
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ BUG_ON(page->mapping != (struct address_space *)anon_vma);
+ BUG_ON(page->index != linear_page_index(vma, address));
+#endif
+}
+
+/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * The caller needs to hold the pte lock.
+ * The caller needs to hold the pte lock and the page must be locked.
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
+ VM_BUG_ON(!PageLocked(page));
+ VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
if (atomic_inc_and_test(&page->_mapcount))
__page_set_anon_rmap(page, vma, address);
- /* else checking page index and mapping is racy */
+ else
+ __page_check_anon_rmap(page, vma, address);
}
/*
@@ -555,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
*
* Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed.
+ * Page does not have to be locked.
*/
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
+ BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
__page_set_anon_rmap(page, vma, address);
}
@@ -575,6 +607,26 @@ void page_add_file_rmap(struct page *page)
__inc_zone_page_state(page, NR_FILE_MAPPED);
}
+#ifdef CONFIG_DEBUG_VM
+/**
+ * page_dup_rmap - duplicate pte mapping to a page
+ * @page: the page to add the mapping to
+ *
+ * For copy_page_range only: minimal extract from page_add_file_rmap /
+ * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
+ * quicker.
+ *
+ * The caller needs to hold the pte lock.
+ */
+void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
+{
+ BUG_ON(page_mapcount(page) == 0);
+ if (PageAnon(page))
+ __page_check_anon_rmap(page, vma, address);
+ atomic_inc(&page->_mapcount);
+}
+#endif
+
/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
diff --git a/mm/shmem.c b/mm/shmem.c
index f01e8deed645..e537317bec4d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2358,13 +2358,11 @@ static void init_once(void *foo, struct kmem_cache *cachep,
{
struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
- if (flags & SLAB_CTOR_CONSTRUCTOR) {
- inode_init_once(&p->vfs_inode);
+ inode_init_once(&p->vfs_inode);
#ifdef CONFIG_TMPFS_POSIX_ACL
- p->i_acl = NULL;
- p->i_default_acl = NULL;
+ p->i_acl = NULL;
+ p->i_default_acl = NULL;
#endif
- }
}
static int init_inodecache(void)
diff --git a/mm/slab.c b/mm/slab.c
index 944b20581f8c..2e71a328aa09 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -409,9 +409,6 @@ struct kmem_cache {
/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);
- /* de-constructor func */
- void (*dtor) (void *, struct kmem_cache *, unsigned long);
-
/* 5) cache creation/removal */
const char *name;
struct list_head next;
@@ -572,21 +569,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
/*
- * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
- * order.
- */
-#if defined(CONFIG_LARGE_ALLOCS)
-#define MAX_OBJ_ORDER 13 /* up to 32Mb */
-#define MAX_GFP_ORDER 13 /* up to 32Mb */
-#elif defined(CONFIG_MMU)
-#define MAX_OBJ_ORDER 5 /* 32 pages */
-#define MAX_GFP_ORDER 5 /* 32 pages */
-#else
-#define MAX_OBJ_ORDER 8 /* up to 1Mb */
-#define MAX_GFP_ORDER 8 /* up to 1Mb */
-#endif
-
-/*
* Do not go above this order unless 0 objects fit into the slab.
*/
#define BREAK_GFP_ORDER_HI 1
@@ -792,6 +774,7 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
*/
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
+ WARN_ON_ONCE(size == 0);
while (size > csizep->cs_size)
csizep++;
@@ -1911,20 +1894,11 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
slab_error(cachep, "end of a freed object "
"was overwritten");
}
- if (cachep->dtor && !(cachep->flags & SLAB_POISON))
- (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
}
}
#else
static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
- if (cachep->dtor) {
- int i;
- for (i = 0; i < cachep->num; i++) {
- void *objp = index_to_obj(cachep, slabp, i);
- (cachep->dtor) (objp, cachep, 0);
- }
- }
}
#endif
@@ -2013,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t left_over = 0;
int gfporder;
- for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
+ for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
@@ -2063,7 +2037,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
return left_over;
}
-static int setup_cpu_cache(struct kmem_cache *cachep)
+static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
{
if (g_cpucache_up == FULL)
return enable_cpucache(cachep);
@@ -2124,7 +2098,7 @@ static int setup_cpu_cache(struct kmem_cache *cachep)
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
- * @dtor: A destructor for the objects.
+ * @dtor: A destructor for the objects (not implemented anymore).
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
@@ -2159,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
+ size > KMALLOC_MAX_SIZE || dtor) {
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();
@@ -2213,9 +2187,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif
- if (flags & SLAB_DESTROY_BY_RCU)
- BUG_ON(dtor);
-
/*
* Always checks flags, a caller might be expecting debug support which
* isn't available.
@@ -2370,7 +2341,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG_ON(!cachep->slabp_cache);
}
cachep->ctor = ctor;
- cachep->dtor = dtor;
cachep->name = name;
if (setup_cpu_cache(cachep)) {
@@ -2625,7 +2595,7 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
}
static void cache_init_objs(struct kmem_cache *cachep,
- struct slab *slabp, unsigned long ctor_flags)
+ struct slab *slabp)
{
int i;
@@ -2649,7 +2619,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp + obj_offset(cachep), cachep,
- ctor_flags);
+ 0);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
@@ -2665,7 +2635,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
cachep->buffer_size / PAGE_SIZE, 0);
#else
if (cachep->ctor)
- cachep->ctor(objp, cachep, ctor_flags);
+ cachep->ctor(objp, cachep, 0);
#endif
slab_bufctl(slabp)[i] = i + 1;
}
@@ -2754,7 +2724,6 @@ static int cache_grow(struct kmem_cache *cachep,
struct slab *slabp;
size_t offset;
gfp_t local_flags;
- unsigned long ctor_flags;
struct kmem_list3 *l3;
/*
@@ -2763,7 +2732,6 @@ static int cache_grow(struct kmem_cache *cachep,
*/
BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
- ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & GFP_LEVEL_MASK);
/* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
@@ -2808,7 +2776,7 @@ static int cache_grow(struct kmem_cache *cachep,
slabp->nodeid = nodeid;
slab_map_pages(cachep, slabp, objp);
- cache_init_objs(cachep, slabp, ctor_flags);
+ cache_init_objs(cachep, slabp);
if (local_flags & __GFP_WAIT)
local_irq_disable();
@@ -2835,7 +2803,6 @@ failed:
* Perform extra freeing checks:
* - detect bad pointers.
* - POISON/RED_ZONE checking
- * - destructor calls, for caches with POISON+dtor
*/
static void kfree_debugcheck(const void *objp)
{
@@ -2894,12 +2861,6 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
- if (cachep->flags & SLAB_POISON && cachep->dtor) {
- /* we want to cache poison the object,
- * call the destruction callback
- */
- cachep->dtor(objp + obj_offset(cachep), cachep, 0);
- }
#ifdef CONFIG_DEBUG_SLAB_LEAK
slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
#endif
@@ -3099,7 +3060,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
#endif
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
- cachep->ctor(objp, cachep, SLAB_CTOR_CONSTRUCTOR);
+ cachep->ctor(objp, cachep, 0);
#if ARCH_SLAB_MINALIGN
if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
diff --git a/mm/slob.c b/mm/slob.c
index c6933bc19bcd..71976c5d40d3 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
+#include <linux/rcupdate.h>
struct slob_block {
int units;
@@ -53,6 +54,16 @@ struct bigblock {
};
typedef struct bigblock bigblock_t;
+/*
+ * struct slob_rcu is inserted at the tail of allocated slob blocks, which
+ * were created with a SLAB_DESTROY_BY_RCU slab. slob_rcu is used to free
+ * the block using call_rcu.
+ */
+struct slob_rcu {
+ struct rcu_head head;
+ int size;
+};
+
static slob_t arena = { .next = &arena, .units = 1 };
static slob_t *slobfree = &arena;
static bigblock_t *bigblocks;
@@ -266,9 +277,9 @@ size_t ksize(const void *block)
struct kmem_cache {
unsigned int size, align;
+ unsigned long flags;
const char *name;
void (*ctor)(void *, struct kmem_cache *, unsigned long);
- void (*dtor)(void *, struct kmem_cache *, unsigned long);
};
struct kmem_cache *kmem_cache_create(const char *name, size_t size,
@@ -283,8 +294,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (c) {
c->name = name;
c->size = size;
+ if (flags & SLAB_DESTROY_BY_RCU) {
+ /* leave room for rcu footer at the end of object */
+ c->size += sizeof(struct slob_rcu);
+ }
+ c->flags = flags;
c->ctor = ctor;
- c->dtor = dtor;
/* ignore alignment unless it's forced */
c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
if (c->align < align)
@@ -312,7 +327,7 @@ void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags)
b = (void *)__get_free_pages(flags, get_order(c->size));
if (c->ctor)
- c->ctor(b, c, SLAB_CTOR_CONSTRUCTOR);
+ c->ctor(b, c, 0);
return b;
}
@@ -328,15 +343,33 @@ void *kmem_cache_zalloc(struct kmem_cache *c, gfp_t flags)
}
EXPORT_SYMBOL(kmem_cache_zalloc);
-void kmem_cache_free(struct kmem_cache *c, void *b)
+static void __kmem_cache_free(void *b, int size)
{
- if (c->dtor)
- c->dtor(b, c, 0);
-
- if (c->size < PAGE_SIZE)
- slob_free(b, c->size);
+ if (size < PAGE_SIZE)
+ slob_free(b, size);
else
- free_pages((unsigned long)b, get_order(c->size));
+ free_pages((unsigned long)b, get_order(size));
+}
+
+static void kmem_rcu_free(struct rcu_head *head)
+{
+ struct slob_rcu *slob_rcu = (struct slob_rcu *)head;
+ void *b = (void *)slob_rcu - (slob_rcu->size - sizeof(struct slob_rcu));
+
+ __kmem_cache_free(b, slob_rcu->size);
+}
+
+void kmem_cache_free(struct kmem_cache *c, void *b)
+{
+ if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
+ struct slob_rcu *slob_rcu;
+ slob_rcu = b + (c->size - sizeof(struct slob_rcu));
+ INIT_RCU_HEAD(&slob_rcu->head);
+ slob_rcu->size = c->size;
+ call_rcu(&slob_rcu->head, kmem_rcu_free);
+ } else {
+ __kmem_cache_free(b, c->size);
+ }
}
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
index b39c8a69a4ff..3e5aefcb4075 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -78,10 +78,18 @@
*
* Overloading of page flags that are otherwise used for LRU management.
*
- * PageActive The slab is used as a cpu cache. Allocations
- * may be performed from the slab. The slab is not
- * on any slab list and cannot be moved onto one.
- * The cpu slab may be equipped with an additioanl
+ * PageActive The slab is frozen and exempt from list processing.
+ * This means that the slab is dedicated to a purpose
+ * such as satisfying allocations for a specific
+ * processor. Objects may be freed in the slab while
+ * it is frozen but slab_free will then skip the usual
+ * list operations. It is up to the processor holding
+ * the slab to integrate the slab into the slab lists
+ * when the slab is no longer needed.
+ *
+ * One use of this flag is to mark slabs that are
+ * used for allocations. Then such a slab becomes a cpu
+ * slab. The cpu slab may be equipped with an additional
* lockless_freelist that allows lockless access to
* free objects in addition to the regular freelist
* that requires the slab lock.
@@ -91,27 +99,42 @@
* the fast path and disables lockless freelists.
*/
-static inline int SlabDebug(struct page *page)
-{
+#define FROZEN (1 << PG_active)
+
#ifdef CONFIG_SLUB_DEBUG
- return PageError(page);
+#define SLABDEBUG (1 << PG_error)
#else
- return 0;
+#define SLABDEBUG 0
#endif
+
+static inline int SlabFrozen(struct page *page)
+{
+ return page->flags & FROZEN;
+}
+
+static inline void SetSlabFrozen(struct page *page)
+{
+ page->flags |= FROZEN;
+}
+
+static inline void ClearSlabFrozen(struct page *page)
+{
+ page->flags &= ~FROZEN;
+}
+
+static inline int SlabDebug(struct page *page)
+{
+ return page->flags & SLABDEBUG;
}
static inline void SetSlabDebug(struct page *page)
{
-#ifdef CONFIG_SLUB_DEBUG
- SetPageError(page);
-#endif
+ page->flags |= SLABDEBUG;
}
static inline void ClearSlabDebug(struct page *page)
{
-#ifdef CONFIG_SLUB_DEBUG
- ClearPageError(page);
-#endif
+ page->flags &= ~SLABDEBUG;
}
/*
@@ -719,6 +742,22 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
return search == NULL;
}
+static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
+{
+ if (s->flags & SLAB_TRACE) {
+ printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
+ s->name,
+ alloc ? "alloc" : "free",
+ object, page->inuse,
+ page->freelist);
+
+ if (!alloc)
+ print_section("Object", (void *)object, s->objsize);
+
+ dump_stack();
+ }
+}
+
/*
* Tracking of fully allocated slabs for debugging purposes.
*/
@@ -743,8 +782,18 @@ static void remove_full(struct kmem_cache *s, struct page *page)
spin_unlock(&n->list_lock);
}
-static int alloc_object_checks(struct kmem_cache *s, struct page *page,
- void *object)
+static void setup_object_debug(struct kmem_cache *s, struct page *page,
+ void *object)
+{
+ if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
+ return;
+
+ init_object(s, object, 0);
+ init_tracking(s, object);
+}
+
+static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
+ void *object, void *addr)
{
if (!check_slab(s, page))
goto bad;
@@ -759,13 +808,16 @@ static int alloc_object_checks(struct kmem_cache *s, struct page *page,
goto bad;
}
- if (!object)
- return 1;
-
- if (!check_object(s, page, object, 0))
+ if (object && !check_object(s, page, object, 0))
goto bad;
+ /* Success perform special debug activities for allocs */
+ if (s->flags & SLAB_STORE_USER)
+ set_track(s, object, TRACK_ALLOC, addr);
+ trace(s, page, object, 1);
+ init_object(s, object, 1);
return 1;
+
bad:
if (PageSlab(page)) {
/*
@@ -783,8 +835,8 @@ bad:
return 0;
}
-static int free_object_checks(struct kmem_cache *s, struct page *page,
- void *object)
+static int free_debug_processing(struct kmem_cache *s, struct page *page,
+ void *object, void *addr)
{
if (!check_slab(s, page))
goto fail;
@@ -818,29 +870,22 @@ static int free_object_checks(struct kmem_cache *s, struct page *page,
"to slab %s", object, page->slab->name);
goto fail;
}
+
+ /* Special debug activities for freeing objects */
+ if (!SlabFrozen(page) && !page->freelist)
+ remove_full(s, page);
+ if (s->flags & SLAB_STORE_USER)
+ set_track(s, object, TRACK_FREE, addr);
+ trace(s, page, object, 0);
+ init_object(s, object, 0);
return 1;
+
fail:
printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
s->name, page, object);
return 0;
}
-static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
-{
- if (s->flags & SLAB_TRACE) {
- printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
- s->name,
- alloc ? "alloc" : "free",
- object, page->inuse,
- page->freelist);
-
- if (!alloc)
- print_section("Object", (void *)object, s->objsize);
-
- dump_stack();
- }
-}
-
static int __init setup_slub_debug(char *str)
{
if (!str || *str != '=')
@@ -891,13 +936,13 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
* On 32 bit platforms the limit is 256k. On 64bit platforms
* the limit is 512k.
*
- * Debugging or ctor/dtors may create a need to move the free
+ * Debugging or ctor may create a need to move the free
* pointer. Fail if this happens.
*/
- if (s->size >= 65535 * sizeof(void *)) {
+ if (s->objsize >= 65535 * sizeof(void *)) {
BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
- BUG_ON(s->ctor || s->dtor);
+ BUG_ON(s->ctor);
}
else
/*
@@ -909,26 +954,20 @@ static void kmem_cache_open_debug_check(struct kmem_cache *s)
s->flags |= slub_debug;
}
#else
+static inline void setup_object_debug(struct kmem_cache *s,
+ struct page *page, void *object) {}
-static inline int alloc_object_checks(struct kmem_cache *s,
- struct page *page, void *object) { return 0; }
+static inline int alloc_debug_processing(struct kmem_cache *s,
+ struct page *page, void *object, void *addr) { return 0; }
-static inline int free_object_checks(struct kmem_cache *s,
- struct page *page, void *object) { return 0; }
+static inline int free_debug_processing(struct kmem_cache *s,
+ struct page *page, void *object, void *addr) { return 0; }
-static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
-static inline void remove_full(struct kmem_cache *s, struct page *page) {}
-static inline void trace(struct kmem_cache *s, struct page *page,
- void *object, int alloc) {}
-static inline void init_object(struct kmem_cache *s,
- void *object, int active) {}
-static inline void init_tracking(struct kmem_cache *s, void *object) {}
static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
static inline int check_object(struct kmem_cache *s, struct page *page,
void *object, int active) { return 1; }
-static inline void set_track(struct kmem_cache *s, void *object,
- enum track_item alloc, void *addr) {}
+static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
#define slub_debug 0
#endif
@@ -965,13 +1004,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
- if (SlabDebug(page)) {
- init_object(s, object, 0);
- init_tracking(s, object);
- }
-
+ setup_object_debug(s, page, object);
if (unlikely(s->ctor))
- s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR);
+ s->ctor(object, s, 0);
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1030,15 +1065,12 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
{
int pages = 1 << s->order;
- if (unlikely(SlabDebug(page) || s->dtor)) {
+ if (unlikely(SlabDebug(page))) {
void *p;
slab_pad_check(s, page);
- for_each_object(p, s, page_address(page)) {
- if (s->dtor)
- s->dtor(p, s, 0);
+ for_each_object(p, s, page_address(page))
check_object(s, page, p, 0);
- }
}
mod_zone_page_state(page_zone(page),
@@ -1138,11 +1170,12 @@ static void remove_partial(struct kmem_cache *s,
*
* Must hold list_lock.
*/
-static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
+static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
{
if (slab_trylock(page)) {
list_del(&page->lru);
n->nr_partial--;
+ SetSlabFrozen(page);
return 1;
}
return 0;
@@ -1166,7 +1199,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru)
- if (lock_and_del_slab(n, page))
+ if (lock_and_freeze_slab(n, page))
goto out;
page = NULL;
out:
@@ -1245,10 +1278,11 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
*
* On exit the slab lock will have been dropped.
*/
-static void putback_slab(struct kmem_cache *s, struct page *page)
+static void unfreeze_slab(struct kmem_cache *s, struct page *page)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+ ClearSlabFrozen(page);
if (page->inuse) {
if (page->freelist)
@@ -1299,9 +1333,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
page->inuse--;
}
s->cpu_slab[cpu] = NULL;
- ClearPageActive(page);
-
- putback_slab(s, page);
+ unfreeze_slab(s, page);
}
static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
@@ -1392,9 +1424,7 @@ another_slab:
new_slab:
page = get_partial(s, gfpflags, node);
if (page) {
-have_slab:
s->cpu_slab[cpu] = page;
- SetPageActive(page);
goto load_freelist;
}
@@ -1424,17 +1454,15 @@ have_slab:
flush_slab(s, s->cpu_slab[cpu], cpu);
}
slab_lock(page);
- goto have_slab;
+ SetSlabFrozen(page);
+ s->cpu_slab[cpu] = page;
+ goto load_freelist;
}
return NULL;
debug:
object = page->freelist;
- if (!alloc_object_checks(s, page, object))
+ if (!alloc_debug_processing(s, page, object, addr))
goto another_slab;
- if (s->flags & SLAB_STORE_USER)
- set_track(s, object, TRACK_ALLOC, addr);
- trace(s, page, object, 1);
- init_object(s, object, 1);
page->inuse++;
page->freelist = object[page->offset];
@@ -1511,11 +1539,7 @@ checks_ok:
page->freelist = object;
page->inuse--;
- if (unlikely(PageActive(page)))
- /*
- * Cpu slabs are never on partial lists and are
- * never freed.
- */
+ if (unlikely(SlabFrozen(page)))
goto out_unlock;
if (unlikely(!page->inuse))
@@ -1545,14 +1569,8 @@ slab_empty:
return;
debug:
- if (!free_object_checks(s, page, x))
+ if (!free_debug_processing(s, page, x, addr))
goto out_unlock;
- if (!PageActive(page) && !page->freelist)
- remove_full(s, page);
- if (s->flags & SLAB_STORE_USER)
- set_track(s, x, TRACK_FREE, addr);
- trace(s, page, object, 0);
- init_object(s, object, 0);
goto checks_ok;
}
@@ -1789,7 +1807,7 @@ static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflag
page->freelist = get_freepointer(kmalloc_caches, n);
page->inuse++;
kmalloc_caches->node[node] = n;
- init_object(kmalloc_caches, n, 1);
+ setup_object_debug(kmalloc_caches, page, n);
init_kmem_cache_node(n);
atomic_long_inc(&n->nr_slabs);
add_partial(n, page);
@@ -1871,7 +1889,7 @@ static int calculate_sizes(struct kmem_cache *s)
* then we should never poison the object itself.
*/
if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
- !s->ctor && !s->dtor)
+ !s->ctor)
s->flags |= __OBJECT_POISON;
else
s->flags &= ~__OBJECT_POISON;
@@ -1899,9 +1917,8 @@ static int calculate_sizes(struct kmem_cache *s)
*/
s->inuse = size;
-#ifdef CONFIG_SLUB_DEBUG
if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
- s->ctor || s->dtor)) {
+ s->ctor)) {
/*
* Relocate free pointer after the object if it is not
* permitted to overwrite the first word of the object on
@@ -1914,6 +1931,7 @@ static int calculate_sizes(struct kmem_cache *s)
size += sizeof(void *);
}
+#ifdef CONFIG_SLUB_DEBUG
if (flags & SLAB_STORE_USER)
/*
* Need to store information about allocs and frees after
@@ -1970,13 +1988,11 @@ static int calculate_sizes(struct kmem_cache *s)
static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
const char *name, size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void *, struct kmem_cache *, unsigned long),
- void (*dtor)(void *, struct kmem_cache *, unsigned long))
+ void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
memset(s, 0, kmem_size);
s->name = name;
s->ctor = ctor;
- s->dtor = dtor;
s->objsize = size;
s->flags = flags;
s->align = align;
@@ -2161,7 +2177,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
down_write(&slub_lock);
if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
- flags, NULL, NULL))
+ flags, NULL))
goto panic;
list_add(&s->list, &slab_caches);
@@ -2463,7 +2479,7 @@ static int slab_unmergeable(struct kmem_cache *s)
if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
return 1;
- if (s->ctor || s->dtor)
+ if (s->ctor)
return 1;
return 0;
@@ -2471,15 +2487,14 @@ static int slab_unmergeable(struct kmem_cache *s)
static struct kmem_cache *find_mergeable(size_t size,
size_t align, unsigned long flags,
- void (*ctor)(void *, struct kmem_cache *, unsigned long),
- void (*dtor)(void *, struct kmem_cache *, unsigned long))
+ void (*ctor)(void *, struct kmem_cache *, unsigned long))
{
struct list_head *h;
if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
return NULL;
- if (ctor || dtor)
+ if (ctor)
return NULL;
size = ALIGN(size, sizeof(void *));
@@ -2521,8 +2536,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
{
struct kmem_cache *s;
+ BUG_ON(dtor);
down_write(&slub_lock);
- s = find_mergeable(size, align, flags, dtor, ctor);
+ s = find_mergeable(size, align, flags, ctor);
if (s) {
s->refcount++;
/*
@@ -2536,7 +2552,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
} else {
s = kmalloc(kmem_size, GFP_KERNEL);
if (s && kmem_cache_open(s, GFP_KERNEL, name,
- size, align, flags, ctor, dtor)) {
+ size, align, flags, ctor)) {
if (sysfs_slab_add(s)) {
kfree(s);
goto err;
@@ -3177,17 +3193,6 @@ static ssize_t ctor_show(struct kmem_cache *s, char *buf)
}
SLAB_ATTR_RO(ctor);
-static ssize_t dtor_show(struct kmem_cache *s, char *buf)
-{
- if (s->dtor) {
- int n = sprint_symbol(buf, (unsigned long)s->dtor);
-
- return n + sprintf(buf + n, "\n");
- }
- return 0;
-}
-SLAB_ATTR_RO(dtor);
-
static ssize_t aliases_show(struct kmem_cache *s, char *buf)
{
return sprintf(buf, "%d\n", s->refcount - 1);
@@ -3419,7 +3424,6 @@ static struct attribute * slab_attrs[] = {
&partial_attr.attr,
&cpu_slabs_attr.attr,
&ctor_attr.attr,
- &dtor_attr.attr,
&aliases_attr.attr,
&align_attr.attr,
&sanity_checks_attr.attr,
diff --git a/mm/sparse.c b/mm/sparse.c
index 6f3fff907bc2..1302f8348d51 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -44,7 +44,7 @@ EXPORT_SYMBOL(page_to_nid);
#endif
#ifdef CONFIG_SPARSEMEM_EXTREME
-static struct mem_section noinline *sparse_index_alloc(int nid)
+static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
{
struct mem_section *section = NULL;
unsigned long array_size = SECTIONS_PER_ROOT *
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index faa2a521dea3..d3a9c5368257 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -311,7 +311,7 @@ struct vm_struct *remove_vm_area(void *addr)
return v;
}
-void __vunmap(void *addr, int deallocate_pages)
+static void __vunmap(void *addr, int deallocate_pages)
{
struct vm_struct *area;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8faf27e5aa98..38254297a494 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/cpu.h>
+#include <linux/sched.h>
#ifdef CONFIG_VM_EVENT_COUNTERS
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};