summaryrefslogtreecommitdiff
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2019-04-12 04:52:08 +0300
committerDavid S. Miller <davem@davemloft.net>2019-04-13 03:34:45 +0300
commitadc6a3ab192eb40fb9d8b093c87d9aa785af4513 (patch)
tree6aae93c08ed6295e47300fbe11dc010ee1b32a1d /lib/rhashtable.c
parentc5783311a1248c437614d438b69c5f31fe483ecb (diff)
downloadlinux-adc6a3ab192eb40fb9d8b093c87d9aa785af4513.tar.xz
rhashtable: move dereference inside rht_ptr()
Rather than dereferencing a pointer to a bucket and then passing the result to rht_ptr(), we now pass in the pointer and do the dereference in rht_ptr(). This requires that we pass in the tbl and hash as well to support RCU checks, and means that the various rht_for_each functions can expect a pointer that can be dereferenced without further care. There are two places where we dereference a bucket pointer where there is no testable protection - in each case we know that we much have exclusive access without having taken a lock. The previous code used rht_dereference() to pretend that holding the mutex provided protects, but holding the mutex never provides protection for accessing buckets. So instead introduce rht_ptr_exclusive() that can be used when there is known to be exclusive access without holding any locks. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index e387ceb00e86..237368ea98c5 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -231,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
err = -ENOENT;
- rht_for_each_from(entry, rht_ptr(*bkt), old_tbl, old_hash) {
+ rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
+ old_tbl, old_hash) {
err = 0;
next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
@@ -248,8 +249,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
- head = rht_ptr(rht_dereference_bucket(new_tbl->buckets[new_hash],
- new_tbl, new_hash));
+ head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
RCU_INIT_POINTER(entry->next, head);
@@ -491,7 +491,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
int elasticity;
elasticity = RHT_ELASTICITY;
- rht_for_each_from(head, rht_ptr(*bkt), tbl, hash) {
+ rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
struct rhlist_head *plist;
@@ -557,7 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
if (unlikely(rht_grow_above_100(ht, tbl)))
return ERR_PTR(-EAGAIN);
- head = rht_ptr(rht_dereference_bucket(*bkt, tbl, hash));
+ head = rht_ptr(bkt, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (ht->rhlist) {
@@ -1139,7 +1139,7 @@ restart:
struct rhash_head *pos, *next;
cond_resched();
- for (pos = rht_ptr(rht_dereference(*rht_bucket(tbl, i), ht)),
+ for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
!rht_is_a_nulls(pos);