summaryrefslogtreecommitdiff
path: root/kernel/locking/percpu-rwsem.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-08-21 20:43:03 +0300
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-10-06 21:25:40 +0300
commitcc5f730b41506d37a5c2826b2e801d0a59853d11 (patch)
tree9fdb987ba3a660abc7c71752ec0c5a953268117f /kernel/locking/percpu-rwsem.c
parentf324a76324c97e81a6ba66a8efac20cdbffd759e (diff)
downloadlinux-cc5f730b41506d37a5c2826b2e801d0a59853d11.tar.xz
locking/percpu-rwsem: Clean up the lockdep annotations in percpu_down_read()
Based on Peter Zijlstra's earlier patch. Change percpu_down_read() to use __down_read(), this way we can do rwsem_acquire_read() unconditionally at the start to make this code more symmetric and clean. Originally-From: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/locking/percpu-rwsem.c')
-rw-r--r--kernel/locking/percpu-rwsem.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c
index 02a726dd9adc..f231e0bb311c 100644
--- a/kernel/locking/percpu-rwsem.c
+++ b/kernel/locking/percpu-rwsem.c
@@ -70,14 +70,14 @@ static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
void percpu_down_read(struct percpu_rw_semaphore *brw)
{
might_sleep();
- if (likely(update_fast_ctr(brw, +1))) {
- rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+ rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
+
+ if (likely(update_fast_ctr(brw, +1)))
return;
- }
- down_read(&brw->rw_sem);
+ /* Avoid rwsem_acquire_read() and rwsem_release() */
+ __down_read(&brw->rw_sem);
atomic_inc(&brw->slow_read_ctr);
- /* avoid up_read()->rwsem_release() */
__up_read(&brw->rw_sem);
}
EXPORT_SYMBOL_GPL(percpu_down_read);