Paul E. McKenney | b5b1189 | 2019-01-17 10:05:33 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Read-Copy Update definitions shared among RCU implementations. |
| 4 | * |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2011 |
| 6 | * |
Paul E. McKenney | b5b1189 | 2019-01-17 10:05:33 -0800 | [diff] [blame] | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #ifndef __LINUX_RCU_H |
| 11 | #define __LINUX_RCU_H |
| 12 | |
Paul Gortmaker | 5cb5c6e | 2014-02-19 14:33:27 -0500 | [diff] [blame] | 13 | #include <trace/events/rcu.h> |
Paul E. McKenney | e99033c | 2011-06-21 00:13:44 -0700 | [diff] [blame] | 14 | |
Paul E. McKenney | c2d8089 | 2018-11-28 12:47:23 -0800 | [diff] [blame] | 15 | /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ |
Paul E. McKenney | 84585aa | 2017-10-04 15:55:16 -0700 | [diff] [blame] | 16 | #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) |
Paul E. McKenney | 6136d6e | 2017-10-03 08:28:04 -0700 | [diff] [blame] | 17 | |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 18 | |
| 19 | /* |
| 20 | * Grace-period counter management. |
| 21 | */ |
| 22 | |
Paul E. McKenney | f1ec57a | 2017-03-21 10:35:57 -0700 | [diff] [blame] | 23 | #define RCU_SEQ_CTR_SHIFT 2 |
Paul E. McKenney | 031aeee | 2017-03-21 07:28:14 -0700 | [diff] [blame] | 24 | #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) |
| 25 | |
| 26 | /* |
| 27 | * Return the counter portion of a sequence number previously returned |
| 28 | * by rcu_seq_snap() or rcu_seq_current(). |
| 29 | */ |
| 30 | static inline unsigned long rcu_seq_ctr(unsigned long s) |
| 31 | { |
| 32 | return s >> RCU_SEQ_CTR_SHIFT; |
| 33 | } |
| 34 | |
| 35 | /* |
| 36 | * Return the state portion of a sequence number previously returned |
| 37 | * by rcu_seq_snap() or rcu_seq_current(). |
| 38 | */ |
| 39 | static inline int rcu_seq_state(unsigned long s) |
| 40 | { |
| 41 | return s & RCU_SEQ_STATE_MASK; |
| 42 | } |
| 43 | |
Paul E. McKenney | 80a7956 | 2017-03-22 15:26:18 -0700 | [diff] [blame] | 44 | /* |
| 45 | * Set the state portion of the pointed-to sequence number. |
| 46 | * The caller is responsible for preventing conflicting updates. |
| 47 | */ |
| 48 | static inline void rcu_seq_set_state(unsigned long *sp, int newstate) |
| 49 | { |
| 50 | WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK); |
| 51 | WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate); |
| 52 | } |
| 53 | |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 54 | /* Adjust sequence number for start of update-side operation. */ |
| 55 | static inline void rcu_seq_start(unsigned long *sp) |
| 56 | { |
| 57 | WRITE_ONCE(*sp, *sp + 1); |
| 58 | smp_mb(); /* Ensure update-side operation after counter increment. */ |
Paul E. McKenney | 031aeee | 2017-03-21 07:28:14 -0700 | [diff] [blame] | 59 | WARN_ON_ONCE(rcu_seq_state(*sp) != 1); |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 60 | } |
| 61 | |
Paul E. McKenney | 9a41420 | 2018-01-31 19:23:24 -0800 | [diff] [blame] | 62 | /* Compute the end-of-grace-period value for the specified sequence number. */ |
| 63 | static inline unsigned long rcu_seq_endval(unsigned long *sp) |
| 64 | { |
| 65 | return (*sp | RCU_SEQ_STATE_MASK) + 1; |
| 66 | } |
| 67 | |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 68 | /* Adjust sequence number for end of update-side operation. */ |
| 69 | static inline void rcu_seq_end(unsigned long *sp) |
| 70 | { |
| 71 | smp_mb(); /* Ensure update-side operation before counter increment. */ |
Paul E. McKenney | 031aeee | 2017-03-21 07:28:14 -0700 | [diff] [blame] | 72 | WARN_ON_ONCE(!rcu_seq_state(*sp)); |
Paul E. McKenney | 9a41420 | 2018-01-31 19:23:24 -0800 | [diff] [blame] | 73 | WRITE_ONCE(*sp, rcu_seq_endval(sp)); |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 74 | } |
| 75 | |
Joel Fernandes (Google) | 0d805a7 | 2018-05-22 23:38:13 -0700 | [diff] [blame] | 76 | /* |
| 77 | * rcu_seq_snap - Take a snapshot of the update side's sequence number. |
| 78 | * |
| 79 | * This function returns the earliest value of the grace-period sequence number |
| 80 | * that will indicate that a full grace period has elapsed since the current |
| 81 | * time. Once the grace-period sequence number has reached this value, it will |
| 82 | * be safe to invoke all callbacks that have been registered prior to the |
| 83 | * current time. This value is the current grace-period number plus two to the |
| 84 | * power of the number of low-order bits reserved for state, then rounded up to |
| 85 | * the next value in which the state bits are all zero. |
| 86 | */ |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 87 | static inline unsigned long rcu_seq_snap(unsigned long *sp) |
| 88 | { |
| 89 | unsigned long s; |
| 90 | |
Paul E. McKenney | 031aeee | 2017-03-21 07:28:14 -0700 | [diff] [blame] | 91 | s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK; |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 92 | smp_mb(); /* Above access must not bleed into critical section. */ |
| 93 | return s; |
| 94 | } |
| 95 | |
Paul E. McKenney | 8660b7d | 2017-03-13 16:48:18 -0700 | [diff] [blame] | 96 | /* Return the current value the update side's sequence number, no ordering. */ |
| 97 | static inline unsigned long rcu_seq_current(unsigned long *sp) |
| 98 | { |
| 99 | return READ_ONCE(*sp); |
| 100 | } |
| 101 | |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 102 | /* |
Paul E. McKenney | 2e3e5e5 | 2018-05-15 11:53:41 -0700 | [diff] [blame] | 103 | * Given a snapshot from rcu_seq_snap(), determine whether or not the |
| 104 | * corresponding update-side operation has started. |
| 105 | */ |
| 106 | static inline bool rcu_seq_started(unsigned long *sp, unsigned long s) |
| 107 | { |
| 108 | return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp)); |
| 109 | } |
| 110 | |
| 111 | /* |
Paul E. McKenney | 2e8c28c | 2017-02-20 14:57:17 -0800 | [diff] [blame] | 112 | * Given a snapshot from rcu_seq_snap(), determine whether or not a |
| 113 | * full update-side operation has occurred. |
| 114 | */ |
| 115 | static inline bool rcu_seq_done(unsigned long *sp, unsigned long s) |
| 116 | { |
| 117 | return ULONG_CMP_GE(READ_ONCE(*sp), s); |
| 118 | } |
| 119 | |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 120 | /* |
Paul E. McKenney | 67e14c1 | 2018-04-27 16:01:46 -0700 | [diff] [blame] | 121 | * Has a grace period completed since the time the old gp_seq was collected? |
| 122 | */ |
| 123 | static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new) |
| 124 | { |
| 125 | return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK); |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Has a grace period started since the time the old gp_seq was collected? |
| 130 | */ |
| 131 | static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new) |
| 132 | { |
| 133 | return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK, |
| 134 | new); |
| 135 | } |
| 136 | |
| 137 | /* |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 138 | * Roughly how many full grace periods have elapsed between the collection |
| 139 | * of the two specified grace periods? |
| 140 | */ |
| 141 | static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old) |
| 142 | { |
Paul E. McKenney | 2ee5aca | 2018-06-09 01:22:20 -0700 | [diff] [blame] | 143 | unsigned long rnd_diff; |
| 144 | |
| 145 | if (old == new) |
| 146 | return 0; |
| 147 | /* |
| 148 | * Compute the number of grace periods (still shifted up), plus |
| 149 | * one if either of new and old is not an exact grace period. |
| 150 | */ |
| 151 | rnd_diff = (new & ~RCU_SEQ_STATE_MASK) - |
| 152 | ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) + |
| 153 | ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK)); |
| 154 | if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff)) |
| 155 | return 1; /* Definitely no grace period has elapsed. */ |
| 156 | return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2; |
Paul E. McKenney | d7219312 | 2018-05-15 15:24:41 -0700 | [diff] [blame] | 157 | } |
| 158 | |
| 159 | /* |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 160 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally |
Paul E. McKenney | 7f87c03 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 161 | * by call_rcu() and rcu callback execution, and are therefore not part |
| 162 | * of the RCU API. These are in rcupdate.h because they are used by all |
| 163 | * RCU implementations. |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 164 | */ |
| 165 | |
| 166 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 167 | # define STATE_RCU_HEAD_READY 0 |
| 168 | # define STATE_RCU_HEAD_QUEUED 1 |
| 169 | |
| 170 | extern struct debug_obj_descr rcuhead_debug_descr; |
| 171 | |
Paul E. McKenney | ae15018 | 2013-04-23 13:20:57 -0700 | [diff] [blame] | 172 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 173 | { |
Paul E. McKenney | ae15018 | 2013-04-23 13:20:57 -0700 | [diff] [blame] | 174 | int r1; |
| 175 | |
| 176 | r1 = debug_object_activate(head, &rcuhead_debug_descr); |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 177 | debug_object_active_state(head, &rcuhead_debug_descr, |
| 178 | STATE_RCU_HEAD_READY, |
| 179 | STATE_RCU_HEAD_QUEUED); |
Paul E. McKenney | ae15018 | 2013-04-23 13:20:57 -0700 | [diff] [blame] | 180 | return r1; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
| 184 | { |
| 185 | debug_object_active_state(head, &rcuhead_debug_descr, |
| 186 | STATE_RCU_HEAD_QUEUED, |
| 187 | STATE_RCU_HEAD_READY); |
| 188 | debug_object_deactivate(head, &rcuhead_debug_descr); |
| 189 | } |
| 190 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
Paul E. McKenney | ae15018 | 2013-04-23 13:20:57 -0700 | [diff] [blame] | 191 | static inline int debug_rcu_head_queue(struct rcu_head *head) |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 192 | { |
Paul E. McKenney | ae15018 | 2013-04-23 13:20:57 -0700 | [diff] [blame] | 193 | return 0; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) |
| 197 | { |
| 198 | } |
| 199 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 200 | |
Teodora Baluta | bd73a7f | 2013-11-11 17:11:24 +0200 | [diff] [blame] | 201 | void kfree(const void *); |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 202 | |
Paul E. McKenney | 406e3e5 | 2014-06-23 13:48:28 -0700 | [diff] [blame] | 203 | /* |
| 204 | * Reclaim the specified callback, either by invoking it (non-lazy case) |
| 205 | * or freeing it directly (lazy case). Return true if lazy, false otherwise. |
| 206 | */ |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 207 | static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head) |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 208 | { |
Paul E. McKenney | 74de696 | 2018-07-24 15:28:09 -0700 | [diff] [blame] | 209 | rcu_callback_t f; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 210 | unsigned long offset = (unsigned long)head->func; |
| 211 | |
Paul E. McKenney | 24ef659 | 2013-10-28 09:22:24 -0700 | [diff] [blame] | 212 | rcu_lock_acquire(&rcu_callback_map); |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 213 | if (__is_kfree_rcu_offset(offset)) { |
Yafang Shao | 4f5fbd7 | 2019-03-26 20:13:11 +0800 | [diff] [blame] | 214 | trace_rcu_invoke_kfree_callback(rn, head, offset); |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 215 | kfree((void *)head - offset); |
Paul E. McKenney | 24ef659 | 2013-10-28 09:22:24 -0700 | [diff] [blame] | 216 | rcu_lock_release(&rcu_callback_map); |
Paul E. McKenney | 406e3e5 | 2014-06-23 13:48:28 -0700 | [diff] [blame] | 217 | return true; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 218 | } else { |
Yafang Shao | 4f5fbd7 | 2019-03-26 20:13:11 +0800 | [diff] [blame] | 219 | trace_rcu_invoke_callback(rn, head); |
Paul E. McKenney | 74de696 | 2018-07-24 15:28:09 -0700 | [diff] [blame] | 220 | f = head->func; |
| 221 | WRITE_ONCE(head->func, (rcu_callback_t)0L); |
| 222 | f(head); |
Paul E. McKenney | 24ef659 | 2013-10-28 09:22:24 -0700 | [diff] [blame] | 223 | rcu_lock_release(&rcu_callback_map); |
Paul E. McKenney | 406e3e5 | 2014-06-23 13:48:28 -0700 | [diff] [blame] | 224 | return false; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 225 | } |
| 226 | } |
| 227 | |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 228 | #ifdef CONFIG_RCU_STALL_COMMON |
| 229 | |
Paul E. McKenney | cdc694b | 2019-06-13 15:30:49 -0700 | [diff] [blame] | 230 | extern int rcu_cpu_stall_ftrace_dump; |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 231 | extern int rcu_cpu_stall_suppress; |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 232 | extern int rcu_cpu_stall_timeout; |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 233 | int rcu_jiffies_till_stall_check(void); |
| 234 | |
Paul E. McKenney | f22ce09 | 2017-09-01 14:40:54 -0700 | [diff] [blame] | 235 | #define rcu_ftrace_dump_stall_suppress() \ |
| 236 | do { \ |
| 237 | if (!rcu_cpu_stall_suppress) \ |
| 238 | rcu_cpu_stall_suppress = 3; \ |
| 239 | } while (0) |
| 240 | |
| 241 | #define rcu_ftrace_dump_stall_unsuppress() \ |
| 242 | do { \ |
| 243 | if (rcu_cpu_stall_suppress == 3) \ |
| 244 | rcu_cpu_stall_suppress = 0; \ |
| 245 | } while (0) |
| 246 | |
| 247 | #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */ |
| 248 | #define rcu_ftrace_dump_stall_suppress() |
| 249 | #define rcu_ftrace_dump_stall_unsuppress() |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 250 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
| 251 | |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 252 | /* |
| 253 | * Strings used in tracepoints need to be exported via the |
| 254 | * tracing system such that tools like perf and trace-cmd can |
| 255 | * translate the string address pointers to actual text. |
| 256 | */ |
| 257 | #define TPS(x) tracepoint_string(x) |
| 258 | |
Paul E. McKenney | b8989b7 | 2017-05-03 12:28:59 -0700 | [diff] [blame] | 259 | /* |
| 260 | * Dump the ftrace buffer, but only one time per callsite per boot. |
| 261 | */ |
| 262 | #define rcu_ftrace_dump(oops_dump_mode) \ |
| 263 | do { \ |
| 264 | static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \ |
| 265 | \ |
| 266 | if (!atomic_read(&___rfd_beenhere) && \ |
Paul E. McKenney | 83b6ca1 | 2017-08-31 16:47:08 -0700 | [diff] [blame] | 267 | !atomic_xchg(&___rfd_beenhere, 1)) { \ |
| 268 | tracing_off(); \ |
Paul E. McKenney | f22ce09 | 2017-09-01 14:40:54 -0700 | [diff] [blame] | 269 | rcu_ftrace_dump_stall_suppress(); \ |
Paul E. McKenney | b8989b7 | 2017-05-03 12:28:59 -0700 | [diff] [blame] | 270 | ftrace_dump(oops_dump_mode); \ |
Paul E. McKenney | f22ce09 | 2017-09-01 14:40:54 -0700 | [diff] [blame] | 271 | rcu_ftrace_dump_stall_unsuppress(); \ |
Paul E. McKenney | 83b6ca1 | 2017-08-31 16:47:08 -0700 | [diff] [blame] | 272 | } \ |
Paul E. McKenney | b8989b7 | 2017-05-03 12:28:59 -0700 | [diff] [blame] | 273 | } while (0) |
| 274 | |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 275 | void rcu_early_boot_tests(void); |
Paul E. McKenney | 52d7e48 | 2017-01-10 02:28:26 -0800 | [diff] [blame] | 276 | void rcu_test_sync_prims(void); |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 277 | |
Lai Jiangshan | 5f6130f | 2014-12-09 17:53:34 +0800 | [diff] [blame] | 278 | /* |
| 279 | * This function really isn't for public consumption, but RCU is special in |
| 280 | * that context switches can allow the state machine to make progress. |
| 281 | */ |
| 282 | extern void resched_cpu(int cpu); |
| 283 | |
Lai Jiangshan | 822175e | 2019-10-15 10:23:56 +0000 | [diff] [blame^] | 284 | #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) |
Paul E. McKenney | 2b34c43 | 2017-03-14 14:29:53 -0700 | [diff] [blame] | 285 | |
| 286 | #include <linux/rcu_node_tree.h> |
| 287 | |
| 288 | extern int rcu_num_lvls; |
Paul E. McKenney | e95d68d | 2017-03-15 13:11:11 -0700 | [diff] [blame] | 289 | extern int num_rcu_lvl[]; |
Paul E. McKenney | 2b34c43 | 2017-03-14 14:29:53 -0700 | [diff] [blame] | 290 | extern int rcu_num_nodes; |
| 291 | static bool rcu_fanout_exact; |
| 292 | static int rcu_fanout_leaf; |
| 293 | |
| 294 | /* |
| 295 | * Compute the per-level fanout, either using the exact fanout specified |
| 296 | * or balancing the tree, depending on the rcu_fanout_exact boot parameter. |
| 297 | */ |
| 298 | static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt) |
| 299 | { |
| 300 | int i; |
| 301 | |
Paul E. McKenney | 36b5dae | 2019-09-18 10:10:31 -0700 | [diff] [blame] | 302 | for (i = 0; i < RCU_NUM_LVLS; i++) |
| 303 | levelspread[i] = INT_MIN; |
Paul E. McKenney | 2b34c43 | 2017-03-14 14:29:53 -0700 | [diff] [blame] | 304 | if (rcu_fanout_exact) { |
| 305 | levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf; |
| 306 | for (i = rcu_num_lvls - 2; i >= 0; i--) |
| 307 | levelspread[i] = RCU_FANOUT; |
| 308 | } else { |
| 309 | int ccur; |
| 310 | int cprv; |
| 311 | |
| 312 | cprv = nr_cpu_ids; |
| 313 | for (i = rcu_num_lvls - 1; i >= 0; i--) { |
| 314 | ccur = levelcnt[i]; |
| 315 | levelspread[i] = (cprv + ccur - 1) / ccur; |
| 316 | cprv = ccur; |
| 317 | } |
| 318 | } |
| 319 | } |
| 320 | |
Paul E. McKenney | 7f87c03 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 321 | /* Returns a pointer to the first leaf rcu_node structure. */ |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 322 | #define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1]) |
Paul E. McKenney | 5b4c11d | 2018-04-13 17:11:44 -0700 | [diff] [blame] | 323 | |
| 324 | /* Is this rcu_node a leaf? */ |
| 325 | #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1) |
| 326 | |
Paul E. McKenney | 5257514 | 2018-04-24 11:03:39 -0700 | [diff] [blame] | 327 | /* Is this rcu_node the last leaf? */ |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 328 | #define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1]) |
Paul E. McKenney | 5257514 | 2018-04-24 11:03:39 -0700 | [diff] [blame] | 329 | |
Paul E. McKenney | efbe451 | 2017-03-15 13:07:53 -0700 | [diff] [blame] | 330 | /* |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 331 | * Do a full breadth-first scan of the {s,}rcu_node structures for the |
Paul E. McKenney | 7f87c03 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 332 | * specified state structure (for SRCU) or the only rcu_state structure |
| 333 | * (for RCU). |
Paul E. McKenney | efbe451 | 2017-03-15 13:07:53 -0700 | [diff] [blame] | 334 | */ |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 335 | #define srcu_for_each_node_breadth_first(sp, rnp) \ |
| 336 | for ((rnp) = &(sp)->node[0]; \ |
| 337 | (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++) |
| 338 | #define rcu_for_each_node_breadth_first(rnp) \ |
| 339 | srcu_for_each_node_breadth_first(&rcu_state, rnp) |
Paul E. McKenney | efbe451 | 2017-03-15 13:07:53 -0700 | [diff] [blame] | 340 | |
| 341 | /* |
Paul E. McKenney | 7f87c03 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 342 | * Scan the leaves of the rcu_node hierarchy for the rcu_state structure. |
| 343 | * Note that if there is a singleton rcu_node tree with but one rcu_node |
| 344 | * structure, this loop -will- visit the rcu_node structure. It is still |
| 345 | * a leaf node, even if it is also the root node. |
Paul E. McKenney | efbe451 | 2017-03-15 13:07:53 -0700 | [diff] [blame] | 346 | */ |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 347 | #define rcu_for_each_leaf_node(rnp) \ |
| 348 | for ((rnp) = rcu_first_leaf_node(); \ |
| 349 | (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++) |
Paul E. McKenney | efbe451 | 2017-03-15 13:07:53 -0700 | [diff] [blame] | 350 | |
| 351 | /* |
| 352 | * Iterate over all possible CPUs in a leaf RCU node. |
| 353 | */ |
| 354 | #define for_each_leaf_node_possible_cpu(rnp, cpu) \ |
Paul E. McKenney | 65963d2 | 2018-01-31 20:24:15 -0800 | [diff] [blame] | 355 | for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \ |
| 356 | (cpu) <= rnp->grphi; \ |
| 357 | (cpu) = cpumask_next((cpu), cpu_possible_mask)) |
| 358 | |
| 359 | /* |
| 360 | * Iterate over all CPUs in a leaf RCU node's specified mask. |
| 361 | */ |
| 362 | #define rcu_find_next_bit(rnp, cpu, mask) \ |
| 363 | ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu))) |
| 364 | #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \ |
| 365 | for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \ |
| 366 | (cpu) <= rnp->grphi; \ |
| 367 | (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask))) |
Paul E. McKenney | efbe451 | 2017-03-15 13:07:53 -0700 | [diff] [blame] | 368 | |
Paul E. McKenney | 83d40bd | 2017-05-09 13:28:51 -0700 | [diff] [blame] | 369 | /* |
| 370 | * Wrappers for the rcu_node::lock acquire and release. |
| 371 | * |
| 372 | * Because the rcu_nodes form a tree, the tree traversal locking will observe |
| 373 | * different lock values, this in turn means that an UNLOCK of one level |
| 374 | * followed by a LOCK of another level does not imply a full memory barrier; |
| 375 | * and most importantly transitivity is lost. |
| 376 | * |
| 377 | * In order to restore full ordering between tree levels, augment the regular |
| 378 | * lock acquire functions with smp_mb__after_unlock_lock(). |
| 379 | * |
| 380 | * As ->lock of struct rcu_node is a __private field, therefore one should use |
| 381 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. |
| 382 | */ |
| 383 | #define raw_spin_lock_rcu_node(p) \ |
| 384 | do { \ |
| 385 | raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \ |
| 386 | smp_mb__after_unlock_lock(); \ |
| 387 | } while (0) |
| 388 | |
| 389 | #define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock)) |
| 390 | |
| 391 | #define raw_spin_lock_irq_rcu_node(p) \ |
| 392 | do { \ |
| 393 | raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ |
| 394 | smp_mb__after_unlock_lock(); \ |
| 395 | } while (0) |
| 396 | |
| 397 | #define raw_spin_unlock_irq_rcu_node(p) \ |
| 398 | raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) |
| 399 | |
Paul E. McKenney | 4e4bea7 | 2017-05-11 15:33:23 -0700 | [diff] [blame] | 400 | #define raw_spin_lock_irqsave_rcu_node(p, flags) \ |
Paul E. McKenney | 83d40bd | 2017-05-09 13:28:51 -0700 | [diff] [blame] | 401 | do { \ |
Paul E. McKenney | 4e4bea7 | 2017-05-11 15:33:23 -0700 | [diff] [blame] | 402 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
Paul E. McKenney | 83d40bd | 2017-05-09 13:28:51 -0700 | [diff] [blame] | 403 | smp_mb__after_unlock_lock(); \ |
| 404 | } while (0) |
| 405 | |
Paul E. McKenney | 4e4bea7 | 2017-05-11 15:33:23 -0700 | [diff] [blame] | 406 | #define raw_spin_unlock_irqrestore_rcu_node(p, flags) \ |
Matthew Wilcox | a32e01e | 2018-01-17 06:24:30 -0800 | [diff] [blame] | 407 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) |
Paul E. McKenney | 83d40bd | 2017-05-09 13:28:51 -0700 | [diff] [blame] | 408 | |
| 409 | #define raw_spin_trylock_rcu_node(p) \ |
| 410 | ({ \ |
| 411 | bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \ |
| 412 | \ |
| 413 | if (___locked) \ |
| 414 | smp_mb__after_unlock_lock(); \ |
| 415 | ___locked; \ |
| 416 | }) |
| 417 | |
Matthew Wilcox | a32e01e | 2018-01-17 06:24:30 -0800 | [diff] [blame] | 418 | #define raw_lockdep_assert_held_rcu_node(p) \ |
| 419 | lockdep_assert_held(&ACCESS_PRIVATE(p, lock)) |
| 420 | |
Lai Jiangshan | 822175e | 2019-10-15 10:23:56 +0000 | [diff] [blame^] | 421 | #endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */ |
Paul E. McKenney | 2b34c43 | 2017-03-14 14:29:53 -0700 | [diff] [blame] | 422 | |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 423 | #ifdef CONFIG_SRCU |
| 424 | void srcu_init(void); |
| 425 | #else /* #ifdef CONFIG_SRCU */ |
| 426 | static inline void srcu_init(void) { } |
| 427 | #endif /* #else #ifdef CONFIG_SRCU */ |
| 428 | |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 429 | #ifdef CONFIG_TINY_RCU |
| 430 | /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */ |
Paul E. McKenney | 7414fac | 2017-06-12 16:44:19 -0700 | [diff] [blame] | 431 | static inline bool rcu_gp_is_normal(void) { return true; } |
| 432 | static inline bool rcu_gp_is_expedited(void) { return false; } |
| 433 | static inline void rcu_expedite_gp(void) { } |
| 434 | static inline void rcu_unexpedite_gp(void) { } |
Paul E. McKenney | bfbd767 | 2018-01-11 12:58:53 -0800 | [diff] [blame] | 435 | static inline void rcu_request_urgent_qs_task(struct task_struct *t) { } |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 436 | #else /* #ifdef CONFIG_TINY_RCU */ |
| 437 | bool rcu_gp_is_normal(void); /* Internal RCU use. */ |
| 438 | bool rcu_gp_is_expedited(void); /* Internal RCU use. */ |
| 439 | void rcu_expedite_gp(void); |
| 440 | void rcu_unexpedite_gp(void); |
| 441 | void rcupdate_announce_bootup_oddness(void); |
Paul E. McKenney | bfbd767 | 2018-01-11 12:58:53 -0800 | [diff] [blame] | 442 | void rcu_request_urgent_qs_task(struct task_struct *t); |
Paul E. McKenney | 25c3632 | 2017-05-03 09:51:55 -0700 | [diff] [blame] | 443 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
| 444 | |
Paul E. McKenney | 8211824 | 2017-05-03 11:13:24 -0700 | [diff] [blame] | 445 | #define RCU_SCHEDULER_INACTIVE 0 |
| 446 | #define RCU_SCHEDULER_INIT 1 |
| 447 | #define RCU_SCHEDULER_RUNNING 2 |
| 448 | |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 449 | enum rcutorture_type { |
| 450 | RCU_FLAVOR, |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 451 | RCU_TASKS_FLAVOR, |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 452 | RCU_TRIVIAL_FLAVOR, |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 453 | SRCU_FLAVOR, |
| 454 | INVALID_RCU_FLAVOR |
| 455 | }; |
| 456 | |
| 457 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) |
| 458 | void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 459 | unsigned long *gp_seq); |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 460 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 461 | struct rcu_head *rhp, |
| 462 | unsigned long secs, |
| 463 | unsigned long c_old, |
| 464 | unsigned long c); |
| 465 | #else |
| 466 | static inline void rcutorture_get_gp_data(enum rcutorture_type test_type, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 467 | int *flags, unsigned long *gp_seq) |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 468 | { |
| 469 | *flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 470 | *gp_seq = 0; |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 471 | } |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 472 | #ifdef CONFIG_RCU_TRACE |
| 473 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 474 | struct rcu_head *rhp, |
| 475 | unsigned long secs, |
| 476 | unsigned long c_old, |
| 477 | unsigned long c); |
| 478 | #else |
| 479 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
| 480 | do { } while (0) |
| 481 | #endif |
| 482 | #endif |
| 483 | |
Paul E. McKenney | c682db5 | 2019-04-19 07:38:27 -0700 | [diff] [blame] | 484 | #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) |
| 485 | long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask); |
| 486 | #endif |
| 487 | |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 488 | #ifdef CONFIG_TINY_SRCU |
| 489 | |
| 490 | static inline void srcutorture_get_gp_data(enum rcutorture_type test_type, |
| 491 | struct srcu_struct *sp, int *flags, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 492 | unsigned long *gp_seq) |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 493 | { |
| 494 | if (test_type != SRCU_FLAVOR) |
| 495 | return; |
| 496 | *flags = 0; |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 497 | *gp_seq = sp->srcu_idx; |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | #elif defined(CONFIG_TREE_SRCU) |
| 501 | |
| 502 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
| 503 | struct srcu_struct *sp, int *flags, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 504 | unsigned long *gp_seq); |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 505 | |
Paul E. McKenney | cad7b38 | 2017-05-03 10:22:57 -0700 | [diff] [blame] | 506 | #endif |
| 507 | |
Paul E. McKenney | e3c8d51 | 2017-05-03 13:37:16 -0700 | [diff] [blame] | 508 | #ifdef CONFIG_TINY_RCU |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 509 | static inline unsigned long rcu_get_gp_seq(void) { return 0; } |
Paul E. McKenney | 7414fac | 2017-06-12 16:44:19 -0700 | [diff] [blame] | 510 | static inline unsigned long rcu_exp_batches_completed(void) { return 0; } |
Paul E. McKenney | 7414fac | 2017-06-12 16:44:19 -0700 | [diff] [blame] | 511 | static inline unsigned long |
| 512 | srcu_batches_completed(struct srcu_struct *sp) { return 0; } |
| 513 | static inline void rcu_force_quiescent_state(void) { } |
Paul E. McKenney | 7414fac | 2017-06-12 16:44:19 -0700 | [diff] [blame] | 514 | static inline void show_rcu_gp_kthreads(void) { } |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 515 | static inline int rcu_get_gp_kthreads_prio(void) { return 0; } |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 516 | static inline void rcu_fwd_progress_check(unsigned long j) { } |
Paul E. McKenney | e3c8d51 | 2017-05-03 13:37:16 -0700 | [diff] [blame] | 517 | #else /* #ifdef CONFIG_TINY_RCU */ |
Paul E. McKenney | 17ef2fe | 2018-04-27 11:39:34 -0700 | [diff] [blame] | 518 | unsigned long rcu_get_gp_seq(void); |
Paul E. McKenney | e3c8d51 | 2017-05-03 13:37:16 -0700 | [diff] [blame] | 519 | unsigned long rcu_exp_batches_completed(void); |
Paul E. McKenney | 5a0465e | 2017-05-04 11:31:04 -0700 | [diff] [blame] | 520 | unsigned long srcu_batches_completed(struct srcu_struct *sp); |
Paul E. McKenney | e3c8d51 | 2017-05-03 13:37:16 -0700 | [diff] [blame] | 521 | void show_rcu_gp_kthreads(void); |
Joel Fernandes (Google) | 4babd85 | 2018-06-19 15:14:18 -0700 | [diff] [blame] | 522 | int rcu_get_gp_kthreads_prio(void); |
Paul E. McKenney | e0aff97 | 2018-10-01 17:40:54 -0700 | [diff] [blame] | 523 | void rcu_fwd_progress_check(unsigned long j); |
Paul E. McKenney | e3c8d51 | 2017-05-03 13:37:16 -0700 | [diff] [blame] | 524 | void rcu_force_quiescent_state(void); |
Paul E. McKenney | ad7c946 | 2018-01-08 14:35:52 -0800 | [diff] [blame] | 525 | extern struct workqueue_struct *rcu_gp_wq; |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 526 | extern struct workqueue_struct *rcu_par_gp_wq; |
Paul E. McKenney | e3c8d51 | 2017-05-03 13:37:16 -0700 | [diff] [blame] | 527 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
| 528 | |
Paul E. McKenney | 44c65ff | 2017-05-15 16:26:34 -0700 | [diff] [blame] | 529 | #ifdef CONFIG_RCU_NOCB_CPU |
Paul E. McKenney | 3d54f79 | 2017-05-03 12:25:50 -0700 | [diff] [blame] | 530 | bool rcu_is_nocb_cpu(int cpu); |
Paul E. McKenney | 5ab7ab8 | 2018-09-21 18:08:09 -0700 | [diff] [blame] | 531 | void rcu_bind_current_to_nocb(void); |
Paul E. McKenney | 3d54f79 | 2017-05-03 12:25:50 -0700 | [diff] [blame] | 532 | #else |
| 533 | static inline bool rcu_is_nocb_cpu(int cpu) { return false; } |
Paul E. McKenney | 5ab7ab8 | 2018-09-21 18:08:09 -0700 | [diff] [blame] | 534 | static inline void rcu_bind_current_to_nocb(void) { } |
Paul E. McKenney | 3d54f79 | 2017-05-03 12:25:50 -0700 | [diff] [blame] | 535 | #endif |
| 536 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 537 | #endif /* __LINUX_RCU_H */ |