summaryrefslogtreecommitdiff
path: root/net/rds/ib_recv.c
diff options
context:
space:
mode:
authorAndy Grover <andy.grover@oracle.com>2010-07-08 03:46:26 +0400
committerAndy Grover <andy.grover@oracle.com>2010-09-09 05:16:38 +0400
commitc20f5b9633bb0953bd2422f0f1430a2028cdbd0a (patch)
treedc628e1168c963940195ac5d2b5dbe3f54682240 /net/rds/ib_recv.c
parentd455ab64096b9a86849c7315c53e595330842db6 (diff)
downloadlinux-c20f5b9633bb0953bd2422f0f1430a2028cdbd0a.tar.xz
RDS/IB: Use SLAB_HWCACHE_ALIGN flag for kmem_cache_create()
We are *definitely* counting cycles as closely as DaveM, so ensure hwcache alignment for our recv ring control structs. Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib_recv.c')
-rw-r--r--net/rds/ib_recv.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 9c4208f6b451..37dab2898ad0 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -1051,13 +1051,13 @@ int __init rds_ib_recv_init(void)
rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
sizeof(struct rds_ib_incoming),
- 0, 0, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!rds_ib_incoming_slab)
goto out;
rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
sizeof(struct rds_page_frag),
- 0, 0, NULL);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!rds_ib_frag_slab)
kmem_cache_destroy(rds_ib_incoming_slab);
else