summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2010-10-02 12:32:32 +0400
committerPekka Enberg <penberg@kernel.org>2010-10-06 17:52:26 +0400
commit15b7c5142049e7efc3071280e1370dc3b8add6f5 (patch)
treec294e0aa90b52088e63c8c67d0963a973d304690 /mm/slub.c
parent5d1f57e4d3d547b113ebd62f569be13bf485e53b (diff)
downloadlinux-15b7c5142049e7efc3071280e1370dc3b8add6f5.tar.xz
SLUB: Optimize slab_free() debug check
This patch optimizes slab_free() debug check to use "c->node != NUMA_NO_NODE" instead of "c->node >= 0" because the former generates smaller code on x86-64: Before: 4736: 48 39 70 08 cmp %rsi,0x8(%rax) 473a: 75 26 jne 4762 <kfree+0xa2> 473c: 44 8b 48 10 mov 0x10(%rax),%r9d 4740: 45 85 c9 test %r9d,%r9d 4743: 78 1d js 4762 <kfree+0xa2> After: 4736: 48 39 70 08 cmp %rsi,0x8(%rax) 473a: 75 23 jne 475f <kfree+0x9f> 473c: 83 78 10 ff cmpl $0xffffffffffffffff,0x10(%rax) 4740: 74 1d je 475f <kfree+0x9f> This patch also cleans up __slab_alloc() to use NUMA_NO_NODE instead of "-1" for enabling debugging for a per-CPU cache. Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 9f121c10184f..a018019aa91d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1718,7 +1718,7 @@ debug:
c->page->inuse++;
c->page->freelist = get_freepointer(s, object);
- c->node = -1;
+ c->node = NUMA_NO_NODE;
goto unlock_out;
}
@@ -1895,7 +1895,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
slab_free_hook_irq(s, x);
- if (likely(page == c->page && c->node >= 0)) {
+ if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
set_freepointer(s, object, c->freelist);
c->freelist = object;
stat(s, FREE_FASTPATH);