summaryrefslogtreecommitdiff
path: root/include/linux/rcupdate_trace.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-03-18 02:02:06 +0300
committerPaul E. McKenney <paulmck@kernel.org>2020-04-27 21:03:52 +0300
commit276c410448dbca357a2bc3539acfe04862e5f172 (patch)
tree5f25b125fb10b79997286884b3724f5415eb5b11 /include/linux/rcupdate_trace.h
parentb0afa0f056676ffe0a7213818f09d2460adbcc16 (diff)
downloadlinux-276c410448dbca357a2bc3539acfe04862e5f172.tar.xz
rcu-tasks: Split ->trc_reader_need_end
This commit splits ->trc_reader_need_end by using the rcu_special union. This change permits readers to check to see if a memory barrier is required without any added overhead in the common case where no such barrier is required. This commit also adds the read-side checking. Later commits will add the machinery to properly set the new ->trc_reader_special.b.need_mb field. This commit also makes rcu_read_unlock_trace_special() tolerate nested read-side critical sections within interrupt and NMI handlers. Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'include/linux/rcupdate_trace.h')
-rw-r--r--include/linux/rcupdate_trace.h11
1 files changed, 7 insertions, 4 deletions
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index ed97e10817bd..c42b365ca176 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -31,7 +31,7 @@ static inline int rcu_read_lock_trace_held(void)
#ifdef CONFIG_TASKS_TRACE_RCU
-void rcu_read_unlock_trace_special(struct task_struct *t);
+void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
@@ -50,6 +50,8 @@ static inline void rcu_read_lock_trace(void)
struct task_struct *t = current;
WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
+ if (t->trc_reader_special.b.need_mb)
+ smp_mb(); // Pairs with update-side barriers
rcu_lock_acquire(&rcu_trace_lock_map);
}
@@ -69,10 +71,11 @@ static inline void rcu_read_unlock_trace(void)
rcu_lock_release(&rcu_trace_lock_map);
nesting = READ_ONCE(t->trc_reader_nesting) - 1;
- WRITE_ONCE(t->trc_reader_nesting, nesting);
- if (likely(!READ_ONCE(t->trc_reader_need_end)) || nesting)
+ if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
+ WRITE_ONCE(t->trc_reader_nesting, nesting);
return; // We assume shallow reader nesting.
- rcu_read_unlock_trace_special(t);
+ }
+ rcu_read_unlock_trace_special(t, nesting);
}
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);