Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 2 | /* |
| 3 | * RCU segmented callback lists, function definitions |
| 4 | * |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2017 |
| 6 | * |
Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 10 | #include <linux/cpu.h> |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 11 | #include <linux/interrupt.h> |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/types.h> |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 14 | |
| 15 | #include "rcu_segcblist.h" |
| 16 | |
| 17 | /* Initialize simple callback list. */ |
| 18 | void rcu_cblist_init(struct rcu_cblist *rclp) |
| 19 | { |
| 20 | rclp->head = NULL; |
| 21 | rclp->tail = &rclp->head; |
| 22 | rclp->len = 0; |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 23 | } |
| 24 | |
| 25 | /* |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 26 | * Enqueue an rcu_head structure onto the specified callback list. |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 27 | */ |
| 28 | void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp) |
| 29 | { |
| 30 | *rclp->tail = rhp; |
| 31 | rclp->tail = &rhp->next; |
| 32 | WRITE_ONCE(rclp->len, rclp->len + 1); |
| 33 | } |
| 34 | |
| 35 | /* |
Paul E. McKenney | d1b222c | 2019-07-02 16:03:33 -0700 | [diff] [blame] | 36 | * Flush the second rcu_cblist structure onto the first one, obliterating |
| 37 | * any contents of the first. If rhp is non-NULL, enqueue it as the sole |
| 38 | * element of the second rcu_cblist structure, but ensuring that the second |
| 39 | * rcu_cblist structure, if initially non-empty, always appears non-empty |
| 40 | * throughout the process. If rdp is NULL, the second rcu_cblist structure |
| 41 | * is instead initialized to empty. |
| 42 | */ |
| 43 | void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp, |
| 44 | struct rcu_cblist *srclp, |
| 45 | struct rcu_head *rhp) |
| 46 | { |
| 47 | drclp->head = srclp->head; |
| 48 | if (drclp->head) |
| 49 | drclp->tail = srclp->tail; |
| 50 | else |
| 51 | drclp->tail = &drclp->head; |
| 52 | drclp->len = srclp->len; |
Paul E. McKenney | d1b222c | 2019-07-02 16:03:33 -0700 | [diff] [blame] | 53 | if (!rhp) { |
| 54 | rcu_cblist_init(srclp); |
| 55 | } else { |
| 56 | rhp->next = NULL; |
| 57 | srclp->head = rhp; |
| 58 | srclp->tail = &rhp->next; |
| 59 | WRITE_ONCE(srclp->len, 1); |
Paul E. McKenney | d1b222c | 2019-07-02 16:03:33 -0700 | [diff] [blame] | 60 | } |
| 61 | } |
| 62 | |
| 63 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 64 | * Dequeue the oldest rcu_head structure from the specified callback |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 65 | * list. |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 66 | */ |
| 67 | struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp) |
| 68 | { |
| 69 | struct rcu_head *rhp; |
| 70 | |
| 71 | rhp = rclp->head; |
| 72 | if (!rhp) |
| 73 | return NULL; |
| 74 | rclp->len--; |
| 75 | rclp->head = rhp->next; |
| 76 | if (!rclp->head) |
| 77 | rclp->tail = &rclp->head; |
| 78 | return rhp; |
| 79 | } |
| 80 | |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 81 | /* Set the length of an rcu_segcblist structure. */ |
kbuild test robot | 1d24dd4 | 2019-08-08 10:32:58 +0800 | [diff] [blame] | 82 | static void rcu_segcblist_set_len(struct rcu_segcblist *rsclp, long v) |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 83 | { |
| 84 | #ifdef CONFIG_RCU_NOCB_CPU |
| 85 | atomic_long_set(&rsclp->len, v); |
| 86 | #else |
| 87 | WRITE_ONCE(rsclp->len, v); |
| 88 | #endif |
| 89 | } |
| 90 | |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 91 | /* Get the length of a segment of the rcu_segcblist structure. */ |
| 92 | static long rcu_segcblist_get_seglen(struct rcu_segcblist *rsclp, int seg) |
| 93 | { |
| 94 | return READ_ONCE(rsclp->seglen[seg]); |
| 95 | } |
| 96 | |
Joel Fernandes (Google) | b4e6039 | 2020-11-18 11:15:41 -0500 | [diff] [blame] | 97 | /* Return number of callbacks in segmented callback list by summing seglen. */ |
| 98 | long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp) |
| 99 | { |
| 100 | long len = 0; |
| 101 | int i; |
| 102 | |
| 103 | for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) |
| 104 | len += rcu_segcblist_get_seglen(rsclp, i); |
| 105 | |
| 106 | return len; |
| 107 | } |
| 108 | |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 109 | /* Set the length of a segment of the rcu_segcblist structure. */ |
| 110 | static void rcu_segcblist_set_seglen(struct rcu_segcblist *rsclp, int seg, long v) |
| 111 | { |
| 112 | WRITE_ONCE(rsclp->seglen[seg], v); |
| 113 | } |
| 114 | |
| 115 | /* Increase the numeric length of a segment by a specified amount. */ |
| 116 | static void rcu_segcblist_add_seglen(struct rcu_segcblist *rsclp, int seg, long v) |
| 117 | { |
| 118 | WRITE_ONCE(rsclp->seglen[seg], rsclp->seglen[seg] + v); |
| 119 | } |
| 120 | |
| 121 | /* Move from's segment length to to's segment. */ |
| 122 | static void rcu_segcblist_move_seglen(struct rcu_segcblist *rsclp, int from, int to) |
| 123 | { |
| 124 | long len; |
| 125 | |
| 126 | if (from == to) |
| 127 | return; |
| 128 | |
| 129 | len = rcu_segcblist_get_seglen(rsclp, from); |
| 130 | if (!len) |
| 131 | return; |
| 132 | |
| 133 | rcu_segcblist_add_seglen(rsclp, to, len); |
| 134 | rcu_segcblist_set_seglen(rsclp, from, 0); |
| 135 | } |
| 136 | |
| 137 | /* Increment segment's length. */ |
| 138 | static void rcu_segcblist_inc_seglen(struct rcu_segcblist *rsclp, int seg) |
| 139 | { |
| 140 | rcu_segcblist_add_seglen(rsclp, seg, 1); |
| 141 | } |
| 142 | |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 143 | /* |
| 144 | * Increase the numeric length of an rcu_segcblist structure by the |
| 145 | * specified amount, which can be negative. This can cause the ->len |
| 146 | * field to disagree with the actual number of callbacks on the structure. |
| 147 | * This increase is fully ordered with respect to the callers accesses |
| 148 | * both before and after. |
Joel Fernandes (Google) | c2e1311 | 2020-11-03 09:26:03 -0500 | [diff] [blame] | 149 | * |
| 150 | * So why on earth is a memory barrier required both before and after |
| 151 | * the update to the ->len field??? |
| 152 | * |
| 153 | * The reason is that rcu_barrier() locklessly samples each CPU's ->len |
| 154 | * field, and if a given CPU's field is zero, avoids IPIing that CPU. |
| 155 | * This can of course race with both queuing and invoking of callbacks. |
| 156 | * Failing to correctly handle either of these races could result in |
| 157 | * rcu_barrier() failing to IPI a CPU that actually had callbacks queued |
| 158 | * which rcu_barrier() was obligated to wait on. And if rcu_barrier() |
| 159 | * failed to wait on such a callback, unloading certain kernel modules |
| 160 | * would result in calls to functions whose code was no longer present in |
| 161 | * the kernel, for but one example. |
| 162 | * |
| 163 | * Therefore, ->len transitions from 1->0 and 0->1 have to be carefully |
| 164 | * ordered with respect with both list modifications and the rcu_barrier(). |
| 165 | * |
| 166 | * The queuing case is CASE 1 and the invoking case is CASE 2. |
| 167 | * |
| 168 | * CASE 1: Suppose that CPU 0 has no callbacks queued, but invokes |
| 169 | * call_rcu() just as CPU 1 invokes rcu_barrier(). CPU 0's ->len field |
| 170 | * will transition from 0->1, which is one of the transitions that must |
| 171 | * be handled carefully. Without the full memory barriers after the ->len |
| 172 | * update and at the beginning of rcu_barrier(), the following could happen: |
| 173 | * |
| 174 | * CPU 0 CPU 1 |
| 175 | * |
| 176 | * call_rcu(). |
| 177 | * rcu_barrier() sees ->len as 0. |
| 178 | * set ->len = 1. |
| 179 | * rcu_barrier() does nothing. |
| 180 | * module is unloaded. |
| 181 | * callback invokes unloaded function! |
| 182 | * |
| 183 | * With the full barriers, any case where rcu_barrier() sees ->len as 0 will |
| 184 | * have unambiguously preceded the return from the racing call_rcu(), which |
| 185 | * means that this call_rcu() invocation is OK to not wait on. After all, |
| 186 | * you are supposed to make sure that any problematic call_rcu() invocations |
| 187 | * happen before the rcu_barrier(). |
| 188 | * |
| 189 | * |
| 190 | * CASE 2: Suppose that CPU 0 is invoking its last callback just as |
| 191 | * CPU 1 invokes rcu_barrier(). CPU 0's ->len field will transition from |
| 192 | * 1->0, which is one of the transitions that must be handled carefully. |
| 193 | * Without the full memory barriers before the ->len update and at the |
| 194 | * end of rcu_barrier(), the following could happen: |
| 195 | * |
| 196 | * CPU 0 CPU 1 |
| 197 | * |
| 198 | * start invoking last callback |
| 199 | * set ->len = 0 (reordered) |
| 200 | * rcu_barrier() sees ->len as 0 |
| 201 | * rcu_barrier() does nothing. |
| 202 | * module is unloaded |
| 203 | * callback executing after unloaded! |
| 204 | * |
| 205 | * With the full barriers, any case where rcu_barrier() sees ->len as 0 |
| 206 | * will be fully ordered after the completion of the callback function, |
| 207 | * so that the module unloading operation is completely safe. |
| 208 | * |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 209 | */ |
Joel Fernandes (Google) | 6bc3358 | 2020-11-03 09:25:57 -0500 | [diff] [blame] | 210 | void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v) |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 211 | { |
| 212 | #ifdef CONFIG_RCU_NOCB_CPU |
Joel Fernandes (Google) | c2e1311 | 2020-11-03 09:26:03 -0500 | [diff] [blame] | 213 | smp_mb__before_atomic(); // Read header comment above. |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 214 | atomic_long_add(v, &rsclp->len); |
Joel Fernandes (Google) | c2e1311 | 2020-11-03 09:26:03 -0500 | [diff] [blame] | 215 | smp_mb__after_atomic(); // Read header comment above. |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 216 | #else |
Joel Fernandes (Google) | c2e1311 | 2020-11-03 09:26:03 -0500 | [diff] [blame] | 217 | smp_mb(); // Read header comment above. |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 218 | WRITE_ONCE(rsclp->len, rsclp->len + v); |
Joel Fernandes (Google) | c2e1311 | 2020-11-03 09:26:03 -0500 | [diff] [blame] | 219 | smp_mb(); // Read header comment above. |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 220 | #endif |
| 221 | } |
| 222 | |
| 223 | /* |
| 224 | * Increase the numeric length of an rcu_segcblist structure by one. |
| 225 | * This can cause the ->len field to disagree with the actual number of |
| 226 | * callbacks on the structure. This increase is fully ordered with respect |
| 227 | * to the callers accesses both before and after. |
| 228 | */ |
| 229 | void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp) |
| 230 | { |
| 231 | rcu_segcblist_add_len(rsclp, 1); |
| 232 | } |
| 233 | |
| 234 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 235 | * Initialize an rcu_segcblist structure. |
| 236 | */ |
| 237 | void rcu_segcblist_init(struct rcu_segcblist *rsclp) |
| 238 | { |
| 239 | int i; |
| 240 | |
| 241 | BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq)); |
| 242 | BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq)); |
| 243 | rsclp->head = NULL; |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 244 | for (i = 0; i < RCU_CBLIST_NSEGS; i++) { |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 245 | rsclp->tails[i] = &rsclp->head; |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 246 | rcu_segcblist_set_seglen(rsclp, i, 0); |
| 247 | } |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 248 | rcu_segcblist_set_len(rsclp, 0); |
Frederic Weisbecker | 65e5603 | 2020-11-13 13:13:16 +0100 | [diff] [blame] | 249 | rcu_segcblist_set_flags(rsclp, SEGCBLIST_ENABLED); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | /* |
| 253 | * Disable the specified rcu_segcblist structure, so that callbacks can |
| 254 | * no longer be posted to it. This structure must be empty. |
| 255 | */ |
| 256 | void rcu_segcblist_disable(struct rcu_segcblist *rsclp) |
| 257 | { |
| 258 | WARN_ON_ONCE(!rcu_segcblist_empty(rsclp)); |
| 259 | WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp)); |
Frederic Weisbecker | 65e5603 | 2020-11-13 13:13:16 +0100 | [diff] [blame] | 260 | rcu_segcblist_clear_flags(rsclp, SEGCBLIST_ENABLED); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 261 | } |
| 262 | |
| 263 | /* |
Frederic Weisbecker | 0efdf14 | 2021-02-23 01:10:01 +0100 | [diff] [blame] | 264 | * Mark the specified rcu_segcblist structure as offloaded. |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 265 | */ |
Frederic Weisbecker | d97b078 | 2020-11-13 13:13:19 +0100 | [diff] [blame] | 266 | void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload) |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 267 | { |
Frederic Weisbecker | d97b078 | 2020-11-13 13:13:19 +0100 | [diff] [blame] | 268 | if (offload) { |
| 269 | rcu_segcblist_clear_flags(rsclp, SEGCBLIST_SOFTIRQ_ONLY); |
| 270 | rcu_segcblist_set_flags(rsclp, SEGCBLIST_OFFLOADED); |
| 271 | } else { |
| 272 | rcu_segcblist_clear_flags(rsclp, SEGCBLIST_OFFLOADED); |
| 273 | } |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 277 | * Does the specified rcu_segcblist structure contain callbacks that |
| 278 | * are ready to be invoked? |
| 279 | */ |
| 280 | bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp) |
| 281 | { |
| 282 | return rcu_segcblist_is_enabled(rsclp) && |
Paul E. McKenney | bfeebe2 | 2020-01-03 16:14:08 -0800 | [diff] [blame] | 283 | &rsclp->head != READ_ONCE(rsclp->tails[RCU_DONE_TAIL]); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 284 | } |
| 285 | |
| 286 | /* |
| 287 | * Does the specified rcu_segcblist structure contain callbacks that |
| 288 | * are still pending, that is, not yet ready to be invoked? |
| 289 | */ |
| 290 | bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp) |
| 291 | { |
| 292 | return rcu_segcblist_is_enabled(rsclp) && |
| 293 | !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL); |
| 294 | } |
| 295 | |
| 296 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 297 | * Return a pointer to the first callback in the specified rcu_segcblist |
| 298 | * structure. This is useful for diagnostics. |
| 299 | */ |
| 300 | struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp) |
| 301 | { |
| 302 | if (rcu_segcblist_is_enabled(rsclp)) |
| 303 | return rsclp->head; |
| 304 | return NULL; |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Return a pointer to the first pending callback in the specified |
| 309 | * rcu_segcblist structure. This is useful just after posting a given |
| 310 | * callback -- if that callback is the first pending callback, then |
| 311 | * you cannot rely on someone else having already started up the required |
| 312 | * grace period. |
| 313 | */ |
| 314 | struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp) |
| 315 | { |
| 316 | if (rcu_segcblist_is_enabled(rsclp)) |
| 317 | return *rsclp->tails[RCU_DONE_TAIL]; |
| 318 | return NULL; |
| 319 | } |
| 320 | |
| 321 | /* |
Paul E. McKenney | 5d6742b | 2019-05-15 09:56:40 -0700 | [diff] [blame] | 322 | * Return false if there are no CBs awaiting grace periods, otherwise, |
| 323 | * return true and store the nearest waited-upon grace period into *lp. |
| 324 | */ |
| 325 | bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp) |
| 326 | { |
| 327 | if (!rcu_segcblist_pend_cbs(rsclp)) |
| 328 | return false; |
| 329 | *lp = rsclp->gp_seq[RCU_WAIT_TAIL]; |
| 330 | return true; |
| 331 | } |
| 332 | |
| 333 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 334 | * Enqueue the specified callback onto the specified rcu_segcblist |
| 335 | * structure, updating accounting as needed. Note that the ->len |
| 336 | * field may be accessed locklessly, hence the WRITE_ONCE(). |
| 337 | * The ->len field is used by rcu_barrier() and friends to determine |
| 338 | * if it must post a callback on this structure, and it is OK |
| 339 | * for rcu_barrier() to sometimes post callbacks needlessly, but |
| 340 | * absolutely not OK for it to ever miss posting a callback. |
| 341 | */ |
| 342 | void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 343 | struct rcu_head *rhp) |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 344 | { |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 345 | rcu_segcblist_inc_len(rsclp); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 346 | rcu_segcblist_inc_seglen(rsclp, RCU_NEXT_TAIL); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 347 | rhp->next = NULL; |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 348 | WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rhp); |
| 349 | WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], &rhp->next); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | /* |
| 353 | * Entrain the specified callback onto the specified rcu_segcblist at |
| 354 | * the end of the last non-empty segment. If the entire rcu_segcblist |
| 355 | * is empty, make no change, but return false. |
| 356 | * |
| 357 | * This is intended for use by rcu_barrier()-like primitives, -not- |
| 358 | * for normal grace-period use. IMPORTANT: The callback you enqueue |
| 359 | * will wait for all prior callbacks, NOT necessarily for a grace |
| 360 | * period. You have been warned. |
| 361 | */ |
| 362 | bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 363 | struct rcu_head *rhp) |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 364 | { |
| 365 | int i; |
| 366 | |
| 367 | if (rcu_segcblist_n_cbs(rsclp) == 0) |
| 368 | return false; |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 369 | rcu_segcblist_inc_len(rsclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 370 | smp_mb(); /* Ensure counts are updated before callback is entrained. */ |
| 371 | rhp->next = NULL; |
| 372 | for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--) |
| 373 | if (rsclp->tails[i] != rsclp->tails[i - 1]) |
| 374 | break; |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 375 | rcu_segcblist_inc_seglen(rsclp, i); |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 376 | WRITE_ONCE(*rsclp->tails[i], rhp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 377 | for (; i <= RCU_NEXT_TAIL; i++) |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 378 | WRITE_ONCE(rsclp->tails[i], &rhp->next); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 379 | return true; |
| 380 | } |
| 381 | |
| 382 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 383 | * Extract only those callbacks ready to be invoked from the specified |
| 384 | * rcu_segcblist structure and place them in the specified rcu_cblist |
| 385 | * structure. |
| 386 | */ |
| 387 | void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, |
| 388 | struct rcu_cblist *rclp) |
| 389 | { |
| 390 | int i; |
| 391 | |
| 392 | if (!rcu_segcblist_ready_cbs(rsclp)) |
| 393 | return; /* Nothing to do. */ |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 394 | rclp->len = rcu_segcblist_get_seglen(rsclp, RCU_DONE_TAIL); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 395 | *rclp->tail = rsclp->head; |
Paul E. McKenney | e6060b4 | 2019-05-13 15:57:50 -0700 | [diff] [blame] | 396 | WRITE_ONCE(rsclp->head, *rsclp->tails[RCU_DONE_TAIL]); |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 397 | WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 398 | rclp->tail = rsclp->tails[RCU_DONE_TAIL]; |
| 399 | for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--) |
| 400 | if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL]) |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 401 | WRITE_ONCE(rsclp->tails[i], &rsclp->head); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 402 | rcu_segcblist_set_seglen(rsclp, RCU_DONE_TAIL, 0); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | /* |
| 406 | * Extract only those callbacks still pending (not yet ready to be |
| 407 | * invoked) from the specified rcu_segcblist structure and place them in |
| 408 | * the specified rcu_cblist structure. Note that this loses information |
| 409 | * about any callbacks that might have been partway done waiting for |
| 410 | * their grace period. Too bad! They will have to start over. |
| 411 | */ |
| 412 | void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, |
| 413 | struct rcu_cblist *rclp) |
| 414 | { |
| 415 | int i; |
| 416 | |
| 417 | if (!rcu_segcblist_pend_cbs(rsclp)) |
| 418 | return; /* Nothing to do. */ |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 419 | rclp->len = 0; |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 420 | *rclp->tail = *rsclp->tails[RCU_DONE_TAIL]; |
| 421 | rclp->tail = rsclp->tails[RCU_NEXT_TAIL]; |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 422 | WRITE_ONCE(*rsclp->tails[RCU_DONE_TAIL], NULL); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 423 | for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++) { |
| 424 | rclp->len += rcu_segcblist_get_seglen(rsclp, i); |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 425 | WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_DONE_TAIL]); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 426 | rcu_segcblist_set_seglen(rsclp, i, 0); |
| 427 | } |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 428 | } |
| 429 | |
| 430 | /* |
| 431 | * Insert counts from the specified rcu_cblist structure in the |
| 432 | * specified rcu_segcblist structure. |
| 433 | */ |
| 434 | void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, |
| 435 | struct rcu_cblist *rclp) |
| 436 | { |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 437 | rcu_segcblist_add_len(rsclp, rclp->len); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 438 | } |
| 439 | |
| 440 | /* |
| 441 | * Move callbacks from the specified rcu_cblist to the beginning of the |
| 442 | * done-callbacks segment of the specified rcu_segcblist. |
| 443 | */ |
| 444 | void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, |
| 445 | struct rcu_cblist *rclp) |
| 446 | { |
| 447 | int i; |
| 448 | |
| 449 | if (!rclp->head) |
| 450 | return; /* No callbacks to move. */ |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 451 | rcu_segcblist_add_seglen(rsclp, RCU_DONE_TAIL, rclp->len); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 452 | *rclp->tail = rsclp->head; |
Paul E. McKenney | e6060b4 | 2019-05-13 15:57:50 -0700 | [diff] [blame] | 453 | WRITE_ONCE(rsclp->head, rclp->head); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 454 | for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) |
| 455 | if (&rsclp->head == rsclp->tails[i]) |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 456 | WRITE_ONCE(rsclp->tails[i], rclp->tail); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 457 | else |
| 458 | break; |
| 459 | rclp->head = NULL; |
| 460 | rclp->tail = &rclp->head; |
| 461 | } |
| 462 | |
| 463 | /* |
| 464 | * Move callbacks from the specified rcu_cblist to the end of the |
| 465 | * new-callbacks segment of the specified rcu_segcblist. |
| 466 | */ |
| 467 | void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, |
| 468 | struct rcu_cblist *rclp) |
| 469 | { |
| 470 | if (!rclp->head) |
| 471 | return; /* Nothing to do. */ |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 472 | |
| 473 | rcu_segcblist_add_seglen(rsclp, RCU_NEXT_TAIL, rclp->len); |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 474 | WRITE_ONCE(*rsclp->tails[RCU_NEXT_TAIL], rclp->head); |
| 475 | WRITE_ONCE(rsclp->tails[RCU_NEXT_TAIL], rclp->tail); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | /* |
| 479 | * Advance the callbacks in the specified rcu_segcblist structure based |
| 480 | * on the current value passed in for the grace-period counter. |
| 481 | */ |
| 482 | void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq) |
| 483 | { |
| 484 | int i, j; |
| 485 | |
| 486 | WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); |
| 487 | if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) |
| 488 | return; |
| 489 | |
| 490 | /* |
| 491 | * Find all callbacks whose ->gp_seq numbers indicate that they |
| 492 | * are ready to invoke, and put them into the RCU_DONE_TAIL segment. |
| 493 | */ |
| 494 | for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) { |
| 495 | if (ULONG_CMP_LT(seq, rsclp->gp_seq[i])) |
| 496 | break; |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 497 | WRITE_ONCE(rsclp->tails[RCU_DONE_TAIL], rsclp->tails[i]); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 498 | rcu_segcblist_move_seglen(rsclp, i, RCU_DONE_TAIL); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 499 | } |
| 500 | |
| 501 | /* If no callbacks moved, nothing more need be done. */ |
| 502 | if (i == RCU_WAIT_TAIL) |
| 503 | return; |
| 504 | |
| 505 | /* Clean up tail pointers that might have been misordered above. */ |
| 506 | for (j = RCU_WAIT_TAIL; j < i; j++) |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 507 | WRITE_ONCE(rsclp->tails[j], rsclp->tails[RCU_DONE_TAIL]); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 508 | |
| 509 | /* |
| 510 | * Callbacks moved, so clean up the misordered ->tails[] pointers |
| 511 | * that now point into the middle of the list of ready-to-invoke |
| 512 | * callbacks. The overall effect is to copy down the later pointers |
| 513 | * into the gap that was created by the now-ready segments. |
| 514 | */ |
| 515 | for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) { |
| 516 | if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL]) |
| 517 | break; /* No more callbacks. */ |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 518 | WRITE_ONCE(rsclp->tails[j], rsclp->tails[i]); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 519 | rcu_segcblist_move_seglen(rsclp, i, j); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 520 | rsclp->gp_seq[j] = rsclp->gp_seq[i]; |
| 521 | } |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * "Accelerate" callbacks based on more-accurate grace-period information. |
| 526 | * The reason for this is that RCU does not synchronize the beginnings and |
| 527 | * ends of grace periods, and that callbacks are posted locally. This in |
| 528 | * turn means that the callbacks must be labelled conservatively early |
| 529 | * on, as getting exact information would degrade both performance and |
| 530 | * scalability. When more accurate grace-period information becomes |
| 531 | * available, previously posted callbacks can be "accelerated", marking |
| 532 | * them to complete at the end of the earlier grace period. |
| 533 | * |
| 534 | * This function operates on an rcu_segcblist structure, and also the |
| 535 | * grace-period sequence number seq at which new callbacks would become |
| 536 | * ready to invoke. Returns true if there are callbacks that won't be |
| 537 | * ready to invoke until seq, false otherwise. |
| 538 | */ |
| 539 | bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq) |
| 540 | { |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 541 | int i, j; |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 542 | |
| 543 | WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp)); |
| 544 | if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL)) |
| 545 | return false; |
| 546 | |
| 547 | /* |
| 548 | * Find the segment preceding the oldest segment of callbacks |
| 549 | * whose ->gp_seq[] completion is at or after that passed in via |
| 550 | * "seq", skipping any empty segments. This oldest segment, along |
| 551 | * with any later segments, can be merged in with any newly arrived |
| 552 | * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq" |
| 553 | * as their ->gp_seq[] grace-period completion sequence number. |
| 554 | */ |
| 555 | for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--) |
| 556 | if (rsclp->tails[i] != rsclp->tails[i - 1] && |
| 557 | ULONG_CMP_LT(rsclp->gp_seq[i], seq)) |
| 558 | break; |
| 559 | |
| 560 | /* |
| 561 | * If all the segments contain callbacks that correspond to |
| 562 | * earlier grace-period sequence numbers than "seq", leave. |
| 563 | * Assuming that the rcu_segcblist structure has enough |
| 564 | * segments in its arrays, this can only happen if some of |
| 565 | * the non-done segments contain callbacks that really are |
| 566 | * ready to invoke. This situation will get straightened |
| 567 | * out by the next call to rcu_segcblist_advance(). |
| 568 | * |
| 569 | * Also advance to the oldest segment of callbacks whose |
| 570 | * ->gp_seq[] completion is at or after that passed in via "seq", |
| 571 | * skipping any empty segments. |
Joel Fernandes (Google) | 5392227 | 2020-06-18 16:29:49 -0400 | [diff] [blame] | 572 | * |
| 573 | * Note that segment "i" (and any lower-numbered segments |
| 574 | * containing older callbacks) will be unaffected, and their |
| 575 | * grace-period numbers remain unchanged. For example, if i == |
| 576 | * WAIT_TAIL, then neither WAIT_TAIL nor DONE_TAIL will be touched. |
| 577 | * Instead, the CBs in NEXT_TAIL will be merged with those in |
| 578 | * NEXT_READY_TAIL and the grace-period number of NEXT_READY_TAIL |
| 579 | * would be updated. NEXT_TAIL would then be empty. |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 580 | */ |
Joel Fernandes (Google) | 5392227 | 2020-06-18 16:29:49 -0400 | [diff] [blame] | 581 | if (rcu_segcblist_restempty(rsclp, i) || ++i >= RCU_NEXT_TAIL) |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 582 | return false; |
| 583 | |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 584 | /* Accounting: everything below i is about to get merged into i. */ |
| 585 | for (j = i + 1; j <= RCU_NEXT_TAIL; j++) |
| 586 | rcu_segcblist_move_seglen(rsclp, j, i); |
| 587 | |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 588 | /* |
| 589 | * Merge all later callbacks, including newly arrived callbacks, |
| 590 | * into the segment located by the for-loop above. Assign "seq" |
| 591 | * as the ->gp_seq[] value in order to correctly handle the case |
| 592 | * where there were no pending callbacks in the rcu_segcblist |
| 593 | * structure other than in the RCU_NEXT_TAIL segment. |
| 594 | */ |
| 595 | for (; i < RCU_NEXT_TAIL; i++) { |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 596 | WRITE_ONCE(rsclp->tails[i], rsclp->tails[RCU_NEXT_TAIL]); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 597 | rsclp->gp_seq[i] = seq; |
| 598 | } |
| 599 | return true; |
| 600 | } |
| 601 | |
| 602 | /* |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 603 | * Merge the source rcu_segcblist structure into the destination |
| 604 | * rcu_segcblist structure, then initialize the source. Any pending |
| 605 | * callbacks from the source get to start over. It is best to |
| 606 | * advance and accelerate both the destination and the source |
| 607 | * before merging. |
| 608 | */ |
| 609 | void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, |
| 610 | struct rcu_segcblist *src_rsclp) |
| 611 | { |
| 612 | struct rcu_cblist donecbs; |
| 613 | struct rcu_cblist pendcbs; |
| 614 | |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 615 | lockdep_assert_cpus_held(); |
| 616 | |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 617 | rcu_cblist_init(&donecbs); |
| 618 | rcu_cblist_init(&pendcbs); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 619 | |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 620 | rcu_segcblist_extract_done_cbs(src_rsclp, &donecbs); |
| 621 | rcu_segcblist_extract_pend_cbs(src_rsclp, &pendcbs); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 622 | |
| 623 | /* |
| 624 | * No need smp_mb() before setting length to 0, because CPU hotplug |
| 625 | * lock excludes rcu_barrier. |
| 626 | */ |
| 627 | rcu_segcblist_set_len(src_rsclp, 0); |
| 628 | |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 629 | rcu_segcblist_insert_count(dst_rsclp, &donecbs); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 630 | rcu_segcblist_insert_count(dst_rsclp, &pendcbs); |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 631 | rcu_segcblist_insert_done_cbs(dst_rsclp, &donecbs); |
| 632 | rcu_segcblist_insert_pend_cbs(dst_rsclp, &pendcbs); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 633 | |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 634 | rcu_segcblist_init(src_rsclp); |
| 635 | } |