Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | /* |
| 3 | * Read-Copy Update mechanism for mutual exclusion, adapted for tracing. |
| 4 | * |
| 5 | * Copyright (C) 2020 Paul E. McKenney. |
| 6 | */ |
| 7 | |
| 8 | #ifndef __LINUX_RCUPDATE_TRACE_H |
| 9 | #define __LINUX_RCUPDATE_TRACE_H |
| 10 | |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/rcupdate.h> |
| 13 | |
| 14 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 15 | |
| 16 | extern struct lockdep_map rcu_trace_lock_map; |
| 17 | |
| 18 | static inline int rcu_read_lock_trace_held(void) |
| 19 | { |
| 20 | return lock_is_held(&rcu_trace_lock_map); |
| 21 | } |
| 22 | |
| 23 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 24 | |
| 25 | static inline int rcu_read_lock_trace_held(void) |
| 26 | { |
| 27 | return 1; |
| 28 | } |
| 29 | |
| 30 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 31 | |
| 32 | #ifdef CONFIG_TASKS_TRACE_RCU |
| 33 | |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 34 | void rcu_read_unlock_trace_special(struct task_struct *t, int nesting); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 35 | |
| 36 | /** |
| 37 | * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section |
| 38 | * |
Paul E. McKenney | c7dcf81 | 2020-06-12 13:11:29 -0700 | [diff] [blame] | 39 | * When synchronize_rcu_tasks_trace() is invoked by one task, then that |
| 40 | * task is guaranteed to block until all other tasks exit their read-side |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 41 | * critical sections. Similarly, if call_rcu_trace() is invoked on one |
| 42 | * task while other tasks are within RCU read-side critical sections, |
| 43 | * invocation of the corresponding RCU callback is deferred until after |
| 44 | * the all the other tasks exit their critical sections. |
| 45 | * |
| 46 | * For more details, please see the documentation for rcu_read_lock(). |
| 47 | */ |
| 48 | static inline void rcu_read_lock_trace(void) |
| 49 | { |
| 50 | struct task_struct *t = current; |
| 51 | |
| 52 | WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1); |
Paul E. McKenney | ba3a86e | 2020-09-14 15:44:37 -0700 | [diff] [blame] | 53 | barrier(); |
Paul E. McKenney | 9ae58d7 | 2020-03-18 17:16:37 -0700 | [diff] [blame] | 54 | if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && |
| 55 | t->trc_reader_special.b.need_mb) |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 56 | smp_mb(); // Pairs with update-side barriers |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 57 | rcu_lock_acquire(&rcu_trace_lock_map); |
| 58 | } |
| 59 | |
| 60 | /** |
| 61 | * rcu_read_unlock_trace - mark end of RCU-trace read-side critical section |
| 62 | * |
| 63 | * Pairs with a preceding call to rcu_read_lock_trace(), and nesting is |
| 64 | * allowed. Invoking a rcu_read_unlock_trace() when there is no matching |
| 65 | * rcu_read_lock_trace() is verboten, and will result in lockdep complaints. |
| 66 | * |
| 67 | * For more details, please see the documentation for rcu_read_unlock(). |
| 68 | */ |
| 69 | static inline void rcu_read_unlock_trace(void) |
| 70 | { |
| 71 | int nesting; |
| 72 | struct task_struct *t = current; |
| 73 | |
| 74 | rcu_lock_release(&rcu_trace_lock_map); |
| 75 | nesting = READ_ONCE(t->trc_reader_nesting) - 1; |
Paul E. McKenney | ba3a86e | 2020-09-14 15:44:37 -0700 | [diff] [blame] | 76 | barrier(); // Critical section before disabling. |
| 77 | // Disable IPI-based setting of .need_qs. |
| 78 | WRITE_ONCE(t->trc_reader_nesting, INT_MIN); |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 79 | if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) { |
| 80 | WRITE_ONCE(t->trc_reader_nesting, nesting); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 81 | return; // We assume shallow reader nesting. |
Paul E. McKenney | 276c410 | 2020-03-17 16:02:06 -0700 | [diff] [blame] | 82 | } |
| 83 | rcu_read_unlock_trace_special(t, nesting); |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func); |
| 87 | void synchronize_rcu_tasks_trace(void); |
| 88 | void rcu_barrier_tasks_trace(void); |
Alexei Starovoitov | 9667305 | 2020-08-31 08:51:55 -0700 | [diff] [blame] | 89 | #else |
| 90 | /* |
| 91 | * The BPF JIT forms these addresses even when it doesn't call these |
| 92 | * functions, so provide definitions that result in runtime errors. |
| 93 | */ |
| 94 | static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); } |
| 95 | static inline void rcu_read_lock_trace(void) { BUG(); } |
| 96 | static inline void rcu_read_unlock_trace(void) { BUG(); } |
Paul E. McKenney | d5f177d | 2020-03-09 19:56:53 -0700 | [diff] [blame] | 97 | #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ |
| 98 | |
| 99 | #endif /* __LINUX_RCUPDATE_TRACE_H */ |