summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-01-03 16:14:18 +0300
committerIngo Molnar <mingo@kernel.org>2018-01-03 16:14:18 +0300
commit475c5ee193fd682c6383b5e418e65e46a477d176 (patch)
tree43f70a77d919e10237edbbbc3ca7ac8076d27410 /lib
parent30a7acd573899fd8b8ac39236eff6468b195ac7d (diff)
parent1dfa55e01987288d847220b8c027204871440ed1 (diff)
downloadlinux-475c5ee193fd682c6383b5e418e65e46a477d176.tar.xz
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney: - Updates to use cond_resched() instead of cond_resched_rcu_qs() where feasible (currently everywhere except in kernel/rcu and in kernel/torture.c). Also a couple of fixes to avoid sending IPIs to offline CPUs. - Updates to simplify RCU's dyntick-idle handling. - Updates to remove almost all uses of smp_read_barrier_depends() and read_barrier_depends(). - Miscellaneous fixes. - Torture-test updates. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/assoc_array.c37
-rw-r--r--lib/percpu-refcount.c8
2 files changed, 16 insertions, 29 deletions
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index b77d51da8c73..c6659cb37033 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -38,12 +38,10 @@ begin_node:
if (assoc_array_ptr_is_shortcut(cursor)) {
/* Descend through a shortcut */
shortcut = assoc_array_ptr_to_shortcut(cursor);
- smp_read_barrier_depends();
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
}
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
slot = 0;
/* We perform two passes of each node.
@@ -55,15 +53,12 @@ begin_node:
*/
has_meta = 0;
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
has_meta |= (unsigned long)ptr;
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
- /* We need a barrier between the read of the pointer
- * and dereferencing the pointer - but only if we are
- * actually going to dereference it.
+ /* We need a barrier between the read of the pointer,
+ * which is supplied by the above READ_ONCE().
*/
- smp_read_barrier_depends();
-
/* Invoke the callback */
ret = iterator(assoc_array_ptr_to_leaf(ptr),
iterator_data);
@@ -86,10 +81,8 @@ begin_node:
continue_node:
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
-
for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (assoc_array_ptr_is_meta(ptr)) {
cursor = ptr;
goto begin_node;
@@ -98,16 +91,15 @@ continue_node:
finished_node:
/* Move up to the parent (may need to skip back over a shortcut) */
- parent = READ_ONCE(node->back_pointer);
+ parent = READ_ONCE(node->back_pointer); /* Address dependency. */
slot = node->parent_slot;
if (parent == stop)
return 0;
if (assoc_array_ptr_is_shortcut(parent)) {
shortcut = assoc_array_ptr_to_shortcut(parent);
- smp_read_barrier_depends();
cursor = parent;
- parent = READ_ONCE(shortcut->back_pointer);
+ parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
slot = shortcut->parent_slot;
if (parent == stop)
return 0;
@@ -147,7 +139,7 @@ int assoc_array_iterate(const struct assoc_array *array,
void *iterator_data),
void *iterator_data)
{
- struct assoc_array_ptr *root = READ_ONCE(array->root);
+ struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
if (!root)
return 0;
@@ -194,7 +186,7 @@ assoc_array_walk(const struct assoc_array *array,
pr_devel("-->%s()\n", __func__);
- cursor = READ_ONCE(array->root);
+ cursor = READ_ONCE(array->root); /* Address dependency. */
if (!cursor)
return assoc_array_walk_tree_empty;
@@ -216,11 +208,9 @@ jumped:
consider_node:
node = assoc_array_ptr_to_node(cursor);
- smp_read_barrier_depends();
-
slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
slot &= ASSOC_ARRAY_FAN_MASK;
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
pr_devel("consider slot %x [ix=%d type=%lu]\n",
slot, level, (unsigned long)ptr & 3);
@@ -254,7 +244,6 @@ consider_node:
cursor = ptr;
follow_shortcut:
shortcut = assoc_array_ptr_to_shortcut(cursor);
- smp_read_barrier_depends();
pr_devel("shortcut to %d\n", shortcut->skip_to_level);
sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
BUG_ON(sc_level > shortcut->skip_to_level);
@@ -294,7 +283,7 @@ follow_shortcut:
} while (sc_level < shortcut->skip_to_level);
/* The shortcut matches the leaf's index to this point. */
- cursor = READ_ONCE(shortcut->next_node);
+ cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
level = sc_level;
goto jumped;
@@ -331,20 +320,18 @@ void *assoc_array_find(const struct assoc_array *array,
return NULL;
node = result.terminal_node.node;
- smp_read_barrier_depends();
/* If the target key is available to us, it's has to be pointed to by
* the terminal node.
*/
for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
- ptr = READ_ONCE(node->slots[slot]);
+ ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
if (ptr && assoc_array_ptr_is_leaf(ptr)) {
/* We need a barrier between the read of the pointer
* and dereferencing the pointer - but only if we are
* actually going to dereference it.
*/
leaf = assoc_array_ptr_to_leaf(ptr);
- smp_read_barrier_depends();
if (ops->compare_object(leaf, index_key))
return (void *)leaf;
}
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe03c6d52761..30e7dd88148b 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -197,10 +197,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/*
- * Restore per-cpu operation. smp_store_release() is paired with
- * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
- * that the zeroing is visible to all percpu accesses which can see
- * the following __PERCPU_REF_ATOMIC clearing.
+ * Restore per-cpu operation. smp_store_release() is paired
+ * with READ_ONCE() in __ref_is_percpu() and guarantees that the
+ * zeroing is visible to all percpu accesses which can see the
+ * following __PERCPU_REF_ATOMIC clearing.
*/
for_each_possible_cpu(cpu)
*per_cpu_ptr(percpu_count, cpu) = 0;