summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2019-04-12 04:52:08 +0300
committerDavid S. Miller <davem@davemloft.net>2019-04-13 03:34:45 +0300
commitf4712b46a529ca2da078c82d5d99d367c7ebf82b (patch)
tree0e77f82fcf92c8483e192ecba4acb6ce8c5a86a8 /lib
parentadc6a3ab192eb40fb9d8b093c87d9aa785af4513 (diff)
downloadlinux-f4712b46a529ca2da078c82d5d99d367c7ebf82b.tar.xz
rhashtable: replace rht_ptr_locked() with rht_assign_locked()
The only times rht_ptr_locked() is used, it is to store a new value in a bucket-head. This is the only time it makes sense to use it too. So replace it by a function which does the whole task: Sets the lock bit and assigns to a bucket head. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 237368ea98c5..ef5378efdef3 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, next);
else
/* Need to preserved the bit lock. */
- rcu_assign_pointer(*bkt, rht_ptr_locked(next));
+ rht_assign_locked(bkt, next);
out:
return err;
@@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
rcu_assign_pointer(*pprev, obj);
else
/* Need to preserve the bit lock */
- rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
+ rht_assign_locked(bkt, obj);
return NULL;
}
@@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
/* bkt is always the head of the list, so it holds
* the lock, which we need to preserve
*/
- rcu_assign_pointer(*bkt, rht_ptr_locked(obj));
+ rht_assign_locked(bkt, obj);
atomic_inc(&ht->nelems);
if (rht_grow_above_75(ht, tbl))