summaryrefslogtreecommitdiff
path: root/mm/slab.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2014-08-07 03:04:31 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 05:01:14 +0400
commit49dfc304ba241b315068023962004542c5118103 (patch)
treecd8563b6b7fc120bf43cfbbe3ab63a5f859fb583 /mm/slab.c
parentc8522a3a5832b843570a3315674f5a3575958a51 (diff)
downloadlinux-49dfc304ba241b315068023962004542c5118103.tar.xz
slab: use the lock on alien_cache, instead of the lock on array_cache
Now, we have separate alien_cache structure, so it'd be better to hold the lock on alien_cache while manipulating alien_cache. After that, we don't need the lock on array_cache, so remove it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c25
1 files changed, 8 insertions, 17 deletions
diff --git a/mm/slab.c b/mm/slab.c
index de91d6f3a2a4..e4ce73c32a7a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -191,7 +191,6 @@ struct array_cache {
unsigned int limit;
unsigned int batchcount;
unsigned int touched;
- spinlock_t lock;
void *entry[]; /*
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
@@ -512,7 +511,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
return;
for_each_node(r) {
if (alc[r])
- lockdep_set_class(&(alc[r]->ac.lock), alc_key);
+ lockdep_set_class(&(alc[r]->lock), alc_key);
}
}
@@ -811,7 +810,6 @@ static void init_arraycache(struct array_cache *ac, int limit, int batch)
ac->limit = limit;
ac->batchcount = batch;
ac->touched = 0;
- spin_lock_init(&ac->lock);
}
}
@@ -1010,6 +1008,7 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
alc = kmalloc_node(memsize, gfp, node);
init_arraycache(&alc->ac, entries, batch);
+ spin_lock_init(&alc->lock);
return alc;
}
@@ -1086,9 +1085,9 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
if (alc) {
ac = &alc->ac;
- if (ac->avail && spin_trylock_irq(&ac->lock)) {
+ if (ac->avail && spin_trylock_irq(&alc->lock)) {
__drain_alien_cache(cachep, ac, node);
- spin_unlock_irq(&ac->lock);
+ spin_unlock_irq(&alc->lock);
}
}
}
@@ -1106,9 +1105,9 @@ static void drain_alien_cache(struct kmem_cache *cachep,
alc = alien[i];
if (alc) {
ac = &alc->ac;
- spin_lock_irqsave(&ac->lock, flags);
+ spin_lock_irqsave(&alc->lock, flags);
__drain_alien_cache(cachep, ac, i);
- spin_unlock_irqrestore(&ac->lock, flags);
+ spin_unlock_irqrestore(&alc->lock, flags);
}
}
}
@@ -1136,13 +1135,13 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
if (n->alien && n->alien[nodeid]) {
alien = n->alien[nodeid];
ac = &alien->ac;
- spin_lock(&ac->lock);
+ spin_lock(&alien->lock);
if (unlikely(ac->avail == ac->limit)) {
STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, ac, nodeid);
}
ac_put_obj(cachep, ac, objp);
- spin_unlock(&ac->lock);
+ spin_unlock(&alien->lock);
} else {
n = get_node(cachep, nodeid);
spin_lock(&n->list_lock);
@@ -1613,10 +1612,6 @@ void __init kmem_cache_init(void)
memcpy(ptr, cpu_cache_get(kmem_cache),
sizeof(struct arraycache_init));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->lock);
kmem_cache->array[smp_processor_id()] = ptr;
@@ -1626,10 +1621,6 @@ void __init kmem_cache_init(void)
!= &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
sizeof(struct arraycache_init));
- /*
- * Do not assume that spinlocks can be initialized via memcpy:
- */
- spin_lock_init(&ptr->lock);
kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
}