summaryrefslogtreecommitdiff
path: root/include/linux/rhashtable.h
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-03-12 17:28:40 +0300
committerDavid S. Miller <davem@davemloft.net>2015-03-13 06:02:30 +0300
commita5b6846f9e1a080493210013385c28faecee36f0 (patch)
tree6b068157f7e2efb5d71978d3a63d14754bcd6a7c /include/linux/rhashtable.h
parent9497df88ab5567daa001829051c5f87161a81ff0 (diff)
downloadlinux-a5b6846f9e1a080493210013385c28faecee36f0.tar.xz
rhashtable: kill ht->shift atomic operations
Commit c0c09bfdc415 ("rhashtable: avoid unnecessary wakeup for worker queue") changed ht->shift to be atomic, which is actually unnecessary. Instead of leaving the current shift in the core rhashtable structure, it can be cached inside the individual bucket tables. There, it will only be initialized once during a new table allocation in the shrink/expansion slow path, and from then onward it stays immutable for the rest of the bucket table liftime. That allows shift to be non-atomic. The patch also moves hash_rnd management into the table setup. The rhashtable structure now consumes 3 instead of 4 cachelines. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Ying Xue <ying.xue@windriver.com> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux/rhashtable.h')
-rw-r--r--include/linux/rhashtable.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 5ef8ea551556..c93ff8ac474a 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -50,6 +50,7 @@ struct rhash_head {
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
* @hash_rnd: Random seed to fold into hash
+ * @shift: Current size (1 << shift)
* @locks_mask: Mask to apply before accessing locks[]
* @locks: Array of spinlocks protecting individual buckets
* @buckets: size * hash buckets
@@ -57,6 +58,7 @@ struct rhash_head {
struct bucket_table {
size_t size;
u32 hash_rnd;
+ u32 shift;
unsigned int locks_mask;
spinlock_t *locks;
@@ -99,7 +101,6 @@ struct rhashtable_params {
* @tbl: Bucket table
* @future_tbl: Table under construction during expansion/shrinking
* @nelems: Number of elements in table
- * @shift: Current size (1 << shift)
* @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
@@ -110,12 +111,11 @@ struct rhashtable {
struct bucket_table __rcu *tbl;
struct bucket_table __rcu *future_tbl;
atomic_t nelems;
- atomic_t shift;
+ bool being_destroyed;
struct rhashtable_params p;
struct work_struct run_work;
struct mutex mutex;
struct list_head walkers;
- bool being_destroyed;
};
/**