blob: 5c293afc07b8e955f1965a93e514664acef440e7 [file] [log] [blame]
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Ingo Molnar45753c52017-05-02 10:31:18 +02002/*
Paul E. McKenney98059b92017-05-02 06:30:12 -07003 * RCU segmented callback lists, internal-to-rcu header file
Ingo Molnar45753c52017-05-02 10:31:18 +02004 *
Ingo Molnar45753c52017-05-02 10:31:18 +02005 * Copyright IBM Corporation, 2017
6 *
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Ingo Molnar45753c52017-05-02 10:31:18 +02008 */
9
10#include <linux/rcu_segcblist.h>
11
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070012/* Return number of callbacks in the specified callback list. */
13static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
14{
15 return READ_ONCE(rclp->len);
16}
17
Paul E. McKenney98059b92017-05-02 06:30:12 -070018void rcu_cblist_init(struct rcu_cblist *rclp);
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -070019void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
20void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
21 struct rcu_cblist *srclp,
22 struct rcu_head *rhp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070023struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
Ingo Molnar45753c52017-05-02 10:31:18 +020024
25/*
26 * Is the specified rcu_segcblist structure empty?
27 *
28 * But careful! The fact that the ->head field is NULL does not
29 * necessarily imply that there are no callbacks associated with
30 * this structure. When callbacks are being invoked, they are
31 * removed as a group. If callback invocation must be preempted,
32 * the remaining callbacks will be added back to the list. Either
33 * way, the counts are updated later.
34 *
35 * So it is often the case that rcu_segcblist_n_cbs() should be used
36 * instead.
37 */
38static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
39{
Paul E. McKenneye6060b42019-05-13 15:57:50 -070040 return !READ_ONCE(rsclp->head);
Ingo Molnar45753c52017-05-02 10:31:18 +020041}
42
43/* Return number of callbacks in segmented callback list. */
44static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
45{
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070046#ifdef CONFIG_RCU_NOCB_CPU
47 return atomic_long_read(&rsclp->len);
48#else
Ingo Molnar45753c52017-05-02 10:31:18 +020049 return READ_ONCE(rsclp->len);
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070050#endif
Ingo Molnar45753c52017-05-02 10:31:18 +020051}
52
Ingo Molnar45753c52017-05-02 10:31:18 +020053/*
54 * Is the specified rcu_segcblist enabled, for example, not corresponding
Paul E. McKenneye83e73f2019-05-14 09:50:49 -070055 * to an offline CPU?
Ingo Molnar45753c52017-05-02 10:31:18 +020056 */
57static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
58{
Paul E. McKenney1bb5f9b2019-04-12 12:34:41 -070059 return rsclp->enabled;
Ingo Molnar45753c52017-05-02 10:31:18 +020060}
61
Paul E. McKenneyce5215c2019-04-12 15:58:34 -070062/* Is the specified rcu_segcblist offloaded? */
63static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
64{
65 return rsclp->offloaded;
66}
67
Ingo Molnar45753c52017-05-02 10:31:18 +020068/*
Ingo Molnar45753c52017-05-02 10:31:18 +020069 * Are all segments following the specified segment of the specified
70 * rcu_segcblist structure empty of callbacks? (The specified
71 * segment might well contain callbacks.)
72 */
73static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
74{
Paul E. McKenney76c69272019-05-13 14:36:11 -070075 return !READ_ONCE(*READ_ONCE(rsclp->tails[seg]));
Ingo Molnar45753c52017-05-02 10:31:18 +020076}
77
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -070078void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070079void rcu_segcblist_init(struct rcu_segcblist *rsclp);
80void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
Paul E. McKenneyce5215c2019-04-12 15:58:34 -070081void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070082bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
83bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070084struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
85struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -070086bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070087void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -040088 struct rcu_head *rhp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070089bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -040090 struct rcu_head *rhp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070091void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
92 struct rcu_cblist *rclp);
93void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
94 struct rcu_cblist *rclp);
95void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
96 struct rcu_cblist *rclp);
97void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
98 struct rcu_cblist *rclp);
99void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
100 struct rcu_cblist *rclp);
101void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
102 struct rcu_cblist *rclp);
103void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
104bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
Paul E. McKenneyf2dbe4a2017-06-27 07:44:06 -0700105void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
106 struct rcu_segcblist *src_rsclp);