summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-02-20 02:53:37 +0300
committerDavid S. Miller <davem@davemloft.net>2015-02-21 01:38:09 +0300
commit342100d937ed6e5faf1e7ee7dcd7b3935fec8877 (patch)
tree4d3f668296689371fb6825344cfa02faf40cc5be /lib/rhashtable.c
parentee92259849b1616e4c23121f78eb1342d2b1ce1e (diff)
downloadlinux-342100d937ed6e5faf1e7ee7dcd7b3935fec8877.tar.xz
rhashtable: don't test for shrink on insert, expansion on delete
Restore pre 54c5b7d311c8 behaviour and only probe for expansions on inserts and shrinks on deletes. Currently, it will happen that on initial inserts into a sparse hash table, we may i.e. shrink it first simply because it's not fully populated yet, only to later realize that we need to grow again. This however is counter intuitive, e.g. an initial default size of 64 elements is already small enough, and in case an elements size hint is given to the hash table by a user, we should avoid unnecessary expansion steps, so a shrink is clearly unintended here. Fixes: 54c5b7d311c8 ("rhashtable: introduce rhashtable_wakeup_worker helper function") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Ying Xue <ying.xue@windriver.com> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9cc4c4a90d00..38f7879df0d8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -537,16 +537,25 @@ unlock:
mutex_unlock(&ht->mutex);
}
-static void rhashtable_wakeup_worker(struct rhashtable *ht)
+static void rhashtable_probe_expand(struct rhashtable *ht)
{
- struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
- struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
- size_t size = tbl->size;
+ const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
/* Only adjust the table if no resizing is currently in progress. */
- if (tbl == new_tbl &&
- ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
- (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
+ if (tbl == new_tbl && ht->p.grow_decision &&
+ ht->p.grow_decision(ht, tbl->size))
+ schedule_work(&ht->run_work);
+}
+
+static void rhashtable_probe_shrink(struct rhashtable *ht)
+{
+ const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
+ const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+ /* Only adjust the table if no resizing is currently in progress. */
+ if (tbl == new_tbl && ht->p.shrink_decision &&
+ ht->p.shrink_decision(ht, tbl->size))
schedule_work(&ht->run_work);
}
@@ -569,7 +578,7 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
atomic_inc(&ht->nelems);
- rhashtable_wakeup_worker(ht);
+ rhashtable_probe_expand(ht);
}
/**
@@ -682,7 +691,7 @@ found:
if (ret) {
atomic_dec(&ht->nelems);
- rhashtable_wakeup_worker(ht);
+ rhashtable_probe_shrink(ht);
}
rcu_read_unlock();