blob: d98d2f9b8d5964033478de5e8f1c5c7b47ee6ea6 [file] [log] [blame]
Ingo Molnar45753c52017-05-02 10:31:18 +02001/*
2 * RCU segmented callback lists
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright IBM Corporation, 2017
19 *
20 * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 */
22
23#include <linux/rcu_segcblist.h>
24
25/* Initialize simple callback list. */
26static inline void rcu_cblist_init(struct rcu_cblist *rclp)
27{
28 rclp->head = NULL;
29 rclp->tail = &rclp->head;
30 rclp->len = 0;
31 rclp->len_lazy = 0;
32}
33
34/* Is simple callback list empty? */
35static inline bool rcu_cblist_empty(struct rcu_cblist *rclp)
36{
37 return !rclp->head;
38}
39
40/* Return number of callbacks in simple callback list. */
41static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
42{
43 return rclp->len;
44}
45
46/* Return number of lazy callbacks in simple callback list. */
47static inline long rcu_cblist_n_lazy_cbs(struct rcu_cblist *rclp)
48{
49 return rclp->len_lazy;
50}
51
52/*
53 * Debug function to actually count the number of callbacks.
54 * If the number exceeds the limit specified, return -1.
55 */
56static inline long rcu_cblist_count_cbs(struct rcu_cblist *rclp, long lim)
57{
58 int cnt = 0;
59 struct rcu_head **rhpp = &rclp->head;
60
61 for (;;) {
62 if (!*rhpp)
63 return cnt;
64 if (++cnt > lim)
65 return -1;
66 rhpp = &(*rhpp)->next;
67 }
68}
69
70/*
71 * Dequeue the oldest rcu_head structure from the specified callback
72 * list. This function assumes that the callback is non-lazy, but
73 * the caller can later invoke rcu_cblist_dequeued_lazy() if it
74 * finds otherwise (and if it cares about laziness). This allows
75 * different users to have different ways of determining laziness.
76 */
77static inline struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp)
78{
79 struct rcu_head *rhp;
80
81 rhp = rclp->head;
82 if (!rhp)
83 return NULL;
84 rclp->len--;
85 rclp->head = rhp->next;
86 if (!rclp->head)
87 rclp->tail = &rclp->head;
88 return rhp;
89}
90
91/*
92 * Account for the fact that a previously dequeued callback turned out
93 * to be marked as lazy.
94 */
95static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
96{
97 rclp->len_lazy--;
98}
99
100/*
101 * Interim function to return rcu_cblist head pointer. Longer term, the
102 * rcu_cblist will be used more pervasively, removing the need for this
103 * function.
104 */
105static inline struct rcu_head *rcu_cblist_head(struct rcu_cblist *rclp)
106{
107 return rclp->head;
108}
109
110/*
111 * Interim function to return rcu_cblist head pointer. Longer term, the
112 * rcu_cblist will be used more pervasively, removing the need for this
113 * function.
114 */
115static inline struct rcu_head **rcu_cblist_tail(struct rcu_cblist *rclp)
116{
117 WARN_ON_ONCE(rcu_cblist_empty(rclp));
118 return rclp->tail;
119}
120
121/*
122 * Initialize an rcu_segcblist structure.
123 */
124static inline void rcu_segcblist_init(struct rcu_segcblist *rsclp)
125{
126 int i;
127
128 BUILD_BUG_ON(RCU_NEXT_TAIL + 1 != ARRAY_SIZE(rsclp->gp_seq));
129 BUILD_BUG_ON(ARRAY_SIZE(rsclp->tails) != ARRAY_SIZE(rsclp->gp_seq));
130 rsclp->head = NULL;
131 for (i = 0; i < RCU_CBLIST_NSEGS; i++)
132 rsclp->tails[i] = &rsclp->head;
133 rsclp->len = 0;
134 rsclp->len_lazy = 0;
135}
136
137/*
138 * Is the specified rcu_segcblist structure empty?
139 *
140 * But careful! The fact that the ->head field is NULL does not
141 * necessarily imply that there are no callbacks associated with
142 * this structure. When callbacks are being invoked, they are
143 * removed as a group. If callback invocation must be preempted,
144 * the remaining callbacks will be added back to the list. Either
145 * way, the counts are updated later.
146 *
147 * So it is often the case that rcu_segcblist_n_cbs() should be used
148 * instead.
149 */
150static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
151{
152 return !rsclp->head;
153}
154
155/* Return number of callbacks in segmented callback list. */
156static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
157{
158 return READ_ONCE(rsclp->len);
159}
160
161/* Return number of lazy callbacks in segmented callback list. */
162static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
163{
164 return rsclp->len_lazy;
165}
166
167/* Return number of lazy callbacks in segmented callback list. */
168static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
169{
170 return rsclp->len - rsclp->len_lazy;
171}
172
173/*
174 * Is the specified rcu_segcblist enabled, for example, not corresponding
175 * to an offline or callback-offloaded CPU?
176 */
177static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
178{
179 return !!rsclp->tails[RCU_NEXT_TAIL];
180}
181
182/*
183 * Disable the specified rcu_segcblist structure, so that callbacks can
184 * no longer be posted to it. This structure must be empty.
185 */
186static inline void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
187{
188 WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
189 WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
190 WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp));
191 rsclp->tails[RCU_NEXT_TAIL] = NULL;
192}
193
194/*
195 * Is the specified segment of the specified rcu_segcblist structure
196 * empty of callbacks?
197 */
198static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg)
199{
200 if (seg == RCU_DONE_TAIL)
201 return &rsclp->head == rsclp->tails[RCU_DONE_TAIL];
202 return rsclp->tails[seg - 1] == rsclp->tails[seg];
203}
204
205/*
206 * Are all segments following the specified segment of the specified
207 * rcu_segcblist structure empty of callbacks? (The specified
208 * segment might well contain callbacks.)
209 */
210static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
211{
212 return !*rsclp->tails[seg];
213}
214
215/*
216 * Does the specified rcu_segcblist structure contain callbacks that
217 * are ready to be invoked?
218 */
219static inline bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp)
220{
221 return rcu_segcblist_is_enabled(rsclp) &&
222 &rsclp->head != rsclp->tails[RCU_DONE_TAIL];
223}
224
225/*
226 * Does the specified rcu_segcblist structure contain callbacks that
227 * are still pending, that is, not yet ready to be invoked?
228 */
229static inline bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp)
230{
231 return rcu_segcblist_is_enabled(rsclp) &&
232 !rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL);
233}
234
235/*
236 * Dequeue and return the first ready-to-invoke callback. If there
237 * are no ready-to-invoke callbacks, return NULL. Disables interrupts
238 * to avoid interference. Does not protect from interference from other
239 * CPUs or tasks.
240 */
241static inline struct rcu_head *
242rcu_segcblist_dequeue(struct rcu_segcblist *rsclp)
243{
244 unsigned long flags;
245 int i;
246 struct rcu_head *rhp;
247
248 local_irq_save(flags);
249 if (!rcu_segcblist_ready_cbs(rsclp)) {
250 local_irq_restore(flags);
251 return NULL;
252 }
253 rhp = rsclp->head;
254 BUG_ON(!rhp);
255 rsclp->head = rhp->next;
256 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++) {
257 if (rsclp->tails[i] != &rhp->next)
258 break;
259 rsclp->tails[i] = &rsclp->head;
260 }
261 smp_mb(); /* Dequeue before decrement for rcu_barrier(). */
262 WRITE_ONCE(rsclp->len, rsclp->len - 1);
263 local_irq_restore(flags);
264 return rhp;
265}
266
267/*
268 * Account for the fact that a previously dequeued callback turned out
269 * to be marked as lazy.
270 */
271static inline void rcu_segcblist_dequeued_lazy(struct rcu_segcblist *rsclp)
272{
273 unsigned long flags;
274
275 local_irq_save(flags);
276 rsclp->len_lazy--;
277 local_irq_restore(flags);
278}
279
280/*
281 * Return a pointer to the first callback in the specified rcu_segcblist
282 * structure. This is useful for diagnostics.
283 */
284static inline struct rcu_head *
285rcu_segcblist_first_cb(struct rcu_segcblist *rsclp)
286{
287 if (rcu_segcblist_is_enabled(rsclp))
288 return rsclp->head;
289 return NULL;
290}
291
292/*
293 * Return a pointer to the first pending callback in the specified
294 * rcu_segcblist structure. This is useful just after posting a given
295 * callback -- if that callback is the first pending callback, then
296 * you cannot rely on someone else having already started up the required
297 * grace period.
298 */
299static inline struct rcu_head *
300rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp)
301{
302 if (rcu_segcblist_is_enabled(rsclp))
303 return *rsclp->tails[RCU_DONE_TAIL];
304 return NULL;
305}
306
307/*
308 * Does the specified rcu_segcblist structure contain callbacks that
309 * have not yet been processed beyond having been posted, that is,
310 * does it contain callbacks in its last segment?
311 */
312static inline bool rcu_segcblist_new_cbs(struct rcu_segcblist *rsclp)
313{
314 return rcu_segcblist_is_enabled(rsclp) &&
315 !rcu_segcblist_restempty(rsclp, RCU_NEXT_READY_TAIL);
316}
317
318/*
319 * Enqueue the specified callback onto the specified rcu_segcblist
320 * structure, updating accounting as needed. Note that the ->len
321 * field may be accessed locklessly, hence the WRITE_ONCE().
322 * The ->len field is used by rcu_barrier() and friends to determine
323 * if it must post a callback on this structure, and it is OK
324 * for rcu_barrier() to sometimes post callbacks needlessly, but
325 * absolutely not OK for it to ever miss posting a callback.
326 */
327static inline void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
328 struct rcu_head *rhp, bool lazy)
329{
330 WRITE_ONCE(rsclp->len, rsclp->len + 1); /* ->len sampled locklessly. */
331 if (lazy)
332 rsclp->len_lazy++;
333 smp_mb(); /* Ensure counts are updated before callback is enqueued. */
334 rhp->next = NULL;
335 *rsclp->tails[RCU_NEXT_TAIL] = rhp;
336 rsclp->tails[RCU_NEXT_TAIL] = &rhp->next;
337}
338
339/*
340 * Entrain the specified callback onto the specified rcu_segcblist at
341 * the end of the last non-empty segment. If the entire rcu_segcblist
342 * is empty, make no change, but return false.
343 *
344 * This is intended for use by rcu_barrier()-like primitives, -not-
345 * for normal grace-period use. IMPORTANT: The callback you enqueue
346 * will wait for all prior callbacks, NOT necessarily for a grace
347 * period. You have been warned.
348 */
349static inline bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
350 struct rcu_head *rhp, bool lazy)
351{
352 int i;
353
354 if (rcu_segcblist_n_cbs(rsclp) == 0)
355 return false;
356 WRITE_ONCE(rsclp->len, rsclp->len + 1);
357 if (lazy)
358 rsclp->len_lazy++;
359 smp_mb(); /* Ensure counts are updated before callback is entrained. */
360 rhp->next = NULL;
361 for (i = RCU_NEXT_TAIL; i > RCU_DONE_TAIL; i--)
362 if (rsclp->tails[i] != rsclp->tails[i - 1])
363 break;
364 *rsclp->tails[i] = rhp;
365 for (; i <= RCU_NEXT_TAIL; i++)
366 rsclp->tails[i] = &rhp->next;
367 return true;
368}
369
370/*
371 * Extract only the counts from the specified rcu_segcblist structure,
372 * and place them in the specified rcu_cblist structure. This function
373 * supports both callback orphaning and invocation, hence the separation
374 * of counts and callbacks. (Callbacks ready for invocation must be
375 * orphaned and adopted separately from pending callbacks, but counts
376 * apply to all callbacks. Locking must be used to make sure that
377 * both orphaned-callbacks lists are consistent.)
378 */
379static inline void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
380 struct rcu_cblist *rclp)
381{
382 rclp->len_lazy += rsclp->len_lazy;
383 rclp->len += rsclp->len;
384 rsclp->len_lazy = 0;
385 WRITE_ONCE(rsclp->len, 0); /* ->len sampled locklessly. */
386}
387
388/*
389 * Extract only those callbacks ready to be invoked from the specified
390 * rcu_segcblist structure and place them in the specified rcu_cblist
391 * structure.
392 */
393static inline void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
394 struct rcu_cblist *rclp)
395{
396 int i;
397
398 if (!rcu_segcblist_ready_cbs(rsclp))
399 return; /* Nothing to do. */
400 *rclp->tail = rsclp->head;
401 rsclp->head = *rsclp->tails[RCU_DONE_TAIL];
402 *rsclp->tails[RCU_DONE_TAIL] = NULL;
403 rclp->tail = rsclp->tails[RCU_DONE_TAIL];
404 for (i = RCU_CBLIST_NSEGS - 1; i >= RCU_DONE_TAIL; i--)
405 if (rsclp->tails[i] == rsclp->tails[RCU_DONE_TAIL])
406 rsclp->tails[i] = &rsclp->head;
407}
408
409/*
410 * Extract only those callbacks still pending (not yet ready to be
411 * invoked) from the specified rcu_segcblist structure and place them in
412 * the specified rcu_cblist structure. Note that this loses information
413 * about any callbacks that might have been partway done waiting for
414 * their grace period. Too bad! They will have to start over.
415 */
416static inline void
417rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
418 struct rcu_cblist *rclp)
419{
420 int i;
421
422 if (!rcu_segcblist_pend_cbs(rsclp))
423 return; /* Nothing to do. */
424 *rclp->tail = *rsclp->tails[RCU_DONE_TAIL];
425 rclp->tail = rsclp->tails[RCU_NEXT_TAIL];
426 *rsclp->tails[RCU_DONE_TAIL] = NULL;
427 for (i = RCU_DONE_TAIL + 1; i < RCU_CBLIST_NSEGS; i++)
428 rsclp->tails[i] = rsclp->tails[RCU_DONE_TAIL];
429}
430
431/*
432 * Move the entire contents of the specified rcu_segcblist structure,
433 * counts, callbacks, and all, to the specified rcu_cblist structure.
434 * @@@ Why do we need this??? Moving early-boot CBs to NOCB lists?
435 * @@@ Memory barrier needed? (Not if only used at boot time...)
436 */
437static inline void rcu_segcblist_extract_all(struct rcu_segcblist *rsclp,
438 struct rcu_cblist *rclp)
439{
440 rcu_segcblist_extract_done_cbs(rsclp, rclp);
441 rcu_segcblist_extract_pend_cbs(rsclp, rclp);
442 rcu_segcblist_extract_count(rsclp, rclp);
443}
444
445/*
446 * Insert counts from the specified rcu_cblist structure in the
447 * specified rcu_segcblist structure.
448 */
449static inline void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
450 struct rcu_cblist *rclp)
451{
452 rsclp->len_lazy += rclp->len_lazy;
453 /* ->len sampled locklessly. */
454 WRITE_ONCE(rsclp->len, rsclp->len + rclp->len);
455 rclp->len_lazy = 0;
456 rclp->len = 0;
457}
458
459/*
460 * Move callbacks from the specified rcu_cblist to the beginning of the
461 * done-callbacks segment of the specified rcu_segcblist.
462 */
463static inline void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
464 struct rcu_cblist *rclp)
465{
466 int i;
467
468 if (!rclp->head)
469 return; /* No callbacks to move. */
470 *rclp->tail = rsclp->head;
471 rsclp->head = rclp->head;
472 for (i = RCU_DONE_TAIL; i < RCU_CBLIST_NSEGS; i++)
473 if (&rsclp->head == rsclp->tails[i])
474 rsclp->tails[i] = rclp->tail;
475 else
476 break;
477 rclp->head = NULL;
478 rclp->tail = &rclp->head;
479}
480
481/*
482 * Move callbacks from the specified rcu_cblist to the end of the
483 * new-callbacks segment of the specified rcu_segcblist.
484 */
485static inline void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
486 struct rcu_cblist *rclp)
487{
488 if (!rclp->head)
489 return; /* Nothing to do. */
490 *rsclp->tails[RCU_NEXT_TAIL] = rclp->head;
491 rsclp->tails[RCU_NEXT_TAIL] = rclp->tail;
492 rclp->head = NULL;
493 rclp->tail = &rclp->head;
494}
495
496/*
497 * Advance the callbacks in the specified rcu_segcblist structure based
498 * on the current value passed in for the grace-period counter.
499 */
500static inline void rcu_segcblist_advance(struct rcu_segcblist *rsclp,
501 unsigned long seq)
502{
503 int i, j;
504
505 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
506 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
507 return;
508
509 /*
510 * Find all callbacks whose ->gp_seq numbers indicate that they
511 * are ready to invoke, and put them into the RCU_DONE_TAIL segment.
512 */
513 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
514 if (ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
515 break;
516 rsclp->tails[RCU_DONE_TAIL] = rsclp->tails[i];
517 }
518
519 /* If no callbacks moved, nothing more need be done. */
520 if (i == RCU_WAIT_TAIL)
521 return;
522
523 /* Clean up tail pointers that might have been misordered above. */
524 for (j = RCU_WAIT_TAIL; j < i; j++)
525 rsclp->tails[j] = rsclp->tails[RCU_DONE_TAIL];
526
527 /*
528 * Callbacks moved, so clean up the misordered ->tails[] pointers
529 * that now point into the middle of the list of ready-to-invoke
530 * callbacks. The overall effect is to copy down the later pointers
531 * into the gap that was created by the now-ready segments.
532 */
533 for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
534 if (rsclp->tails[j] == rsclp->tails[RCU_NEXT_TAIL])
535 break; /* No more callbacks. */
536 rsclp->tails[j] = rsclp->tails[i];
537 rsclp->gp_seq[j] = rsclp->gp_seq[i];
538 }
539}
540
541/*
542 * "Accelerate" callbacks based on more-accurate grace-period information.
543 * The reason for this is that RCU does not synchronize the beginnings and
544 * ends of grace periods, and that callbacks are posted locally. This in
545 * turn means that the callbacks must be labelled conservatively early
546 * on, as getting exact information would degrade both performance and
547 * scalability. When more accurate grace-period information becomes
548 * available, previously posted callbacks can be "accelerated", marking
549 * them to complete at the end of the earlier grace period.
550 *
551 * This function operates on an rcu_segcblist structure, and also the
552 * grace-period sequence number seq at which new callbacks would become
553 * ready to invoke. Returns true if there are callbacks that won't be
554 * ready to invoke until seq, false otherwise.
555 */
556static inline bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp,
557 unsigned long seq)
558{
559 int i;
560
561 WARN_ON_ONCE(!rcu_segcblist_is_enabled(rsclp));
562 if (rcu_segcblist_restempty(rsclp, RCU_DONE_TAIL))
563 return false;
564
565 /*
566 * Find the segment preceding the oldest segment of callbacks
567 * whose ->gp_seq[] completion is at or after that passed in via
568 * "seq", skipping any empty segments. This oldest segment, along
569 * with any later segments, can be merged in with any newly arrived
570 * callbacks in the RCU_NEXT_TAIL segment, and assigned "seq"
571 * as their ->gp_seq[] grace-period completion sequence number.
572 */
573 for (i = RCU_NEXT_READY_TAIL; i > RCU_DONE_TAIL; i--)
574 if (rsclp->tails[i] != rsclp->tails[i - 1] &&
575 ULONG_CMP_LT(rsclp->gp_seq[i], seq))
576 break;
577
578 /*
579 * If all the segments contain callbacks that correspond to
580 * earlier grace-period sequence numbers than "seq", leave.
581 * Assuming that the rcu_segcblist structure has enough
582 * segments in its arrays, this can only happen if some of
583 * the non-done segments contain callbacks that really are
584 * ready to invoke. This situation will get straightened
585 * out by the next call to rcu_segcblist_advance().
586 *
587 * Also advance to the oldest segment of callbacks whose
588 * ->gp_seq[] completion is at or after that passed in via "seq",
589 * skipping any empty segments.
590 */
591 if (++i >= RCU_NEXT_TAIL)
592 return false;
593
594 /*
595 * Merge all later callbacks, including newly arrived callbacks,
596 * into the segment located by the for-loop above. Assign "seq"
597 * as the ->gp_seq[] value in order to correctly handle the case
598 * where there were no pending callbacks in the rcu_segcblist
599 * structure other than in the RCU_NEXT_TAIL segment.
600 */
601 for (; i < RCU_NEXT_TAIL; i++) {
602 rsclp->tails[i] = rsclp->tails[RCU_NEXT_TAIL];
603 rsclp->gp_seq[i] = seq;
604 }
605 return true;
606}
607
608/*
609 * Scan the specified rcu_segcblist structure for callbacks that need
610 * a grace period later than the one specified by "seq". We don't look
611 * at the RCU_DONE_TAIL or RCU_NEXT_TAIL segments because they don't
612 * have a grace-period sequence number.
613 */
614static inline bool rcu_segcblist_future_gp_needed(struct rcu_segcblist *rsclp,
615 unsigned long seq)
616{
617 int i;
618
619 for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
620 if (rsclp->tails[i - 1] != rsclp->tails[i] &&
621 ULONG_CMP_LT(seq, rsclp->gp_seq[i]))
622 return true;
623 return false;
624}
625
626/*
627 * Interim function to return rcu_segcblist head pointer. Longer term, the
628 * rcu_segcblist will be used more pervasively, removing the need for this
629 * function.
630 */
631static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp)
632{
633 return rsclp->head;
634}
635
636/*
637 * Interim function to return rcu_segcblist head pointer. Longer term, the
638 * rcu_segcblist will be used more pervasively, removing the need for this
639 * function.
640 */
641static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp)
642{
643 WARN_ON_ONCE(rcu_segcblist_empty(rsclp));
644 return rsclp->tails[RCU_NEXT_TAIL];
645}