summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2015-11-21 02:57:38 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-21 03:17:32 +0300
commit87098373e244840e00bd1c93884c1d917411597e (patch)
tree3edf3bc77482a1424c11732060bc86fc589df242
parenta380a3c75529a5c42b78c0d64a46404f8cb0c0d1 (diff)
downloadlinux-87098373e244840e00bd1c93884c1d917411597e.tar.xz
slub: avoid irqoff/on in bulk allocation
Use the new function that can do allocation while interrupts are disabled. Avoids irq on/off sequences. Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alexander Duyck <alexander.h.duyck@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 2a952751bb50..23f9d8d26422 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2818,30 +2818,23 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
void *object = c->freelist;
if (unlikely(!object)) {
- local_irq_enable();
/*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
*/
- p[i] = __slab_alloc(s, flags, NUMA_NO_NODE,
+ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
_RET_IP_, c);
- if (unlikely(!p[i])) {
- __kmem_cache_free_bulk(s, i, p);
- return false;
- }
- local_irq_disable();
+ if (unlikely(!p[i]))
+ goto error;
+
c = this_cpu_ptr(s->cpu_slab);
continue; /* goto for-loop */
}
/* kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
- if (unlikely(!s)) {
- __kmem_cache_free_bulk(s, i, p);
- c->tid = next_tid(c->tid);
- local_irq_enable();
- return false;
- }
+ if (unlikely(!s))
+ goto error;
c->freelist = get_freepointer(s, object);
p[i] = object;
@@ -2861,6 +2854,11 @@ bool kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
}
return true;
+
+error:
+ __kmem_cache_free_bulk(s, i, p);
+ local_irq_enable();
+ return false;
}
EXPORT_SYMBOL(kmem_cache_alloc_bulk);