blob: e373fbe44da5e8271d7e4db19e2ed8d6d12a82f4 [file] [log] [blame]
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Ingo Molnar45753c52017-05-02 10:31:18 +02002/*
Paul E. McKenney98059b92017-05-02 06:30:12 -07003 * RCU segmented callback lists, internal-to-rcu header file
Ingo Molnar45753c52017-05-02 10:31:18 +02004 *
Ingo Molnar45753c52017-05-02 10:31:18 +02005 * Copyright IBM Corporation, 2017
6 *
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Ingo Molnar45753c52017-05-02 10:31:18 +02008 */
9
10#include <linux/rcu_segcblist.h>
11
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070012/* Return number of callbacks in the specified callback list. */
13static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
14{
15 return READ_ONCE(rclp->len);
16}
17
Joel Fernandes (Google)b4e60392020-11-18 11:15:41 -050018/* Return number of callbacks in segmented callback list by summing seglen. */
19long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp);
20
Paul E. McKenney98059b92017-05-02 06:30:12 -070021void rcu_cblist_init(struct rcu_cblist *rclp);
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -070022void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp);
23void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp,
24 struct rcu_cblist *srclp,
25 struct rcu_head *rhp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070026struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
Ingo Molnar45753c52017-05-02 10:31:18 +020027
28/*
29 * Is the specified rcu_segcblist structure empty?
30 *
31 * But careful! The fact that the ->head field is NULL does not
32 * necessarily imply that there are no callbacks associated with
33 * this structure. When callbacks are being invoked, they are
34 * removed as a group. If callback invocation must be preempted,
35 * the remaining callbacks will be added back to the list. Either
36 * way, the counts are updated later.
37 *
38 * So it is often the case that rcu_segcblist_n_cbs() should be used
39 * instead.
40 */
41static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
42{
Paul E. McKenneye6060b42019-05-13 15:57:50 -070043 return !READ_ONCE(rsclp->head);
Ingo Molnar45753c52017-05-02 10:31:18 +020044}
45
46/* Return number of callbacks in segmented callback list. */
47static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
48{
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070049#ifdef CONFIG_RCU_NOCB_CPU
50 return atomic_long_read(&rsclp->len);
51#else
Ingo Molnar45753c52017-05-02 10:31:18 +020052 return READ_ONCE(rsclp->len);
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070053#endif
Ingo Molnar45753c52017-05-02 10:31:18 +020054}
55
Frederic Weisbecker65e56032020-11-13 13:13:16 +010056static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp,
57 int flags)
58{
59 rsclp->flags |= flags;
60}
61
62static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp,
63 int flags)
64{
65 rsclp->flags &= ~flags;
66}
67
68static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp,
69 int flags)
70{
71 return READ_ONCE(rsclp->flags) & flags;
72}
73
Ingo Molnar45753c52017-05-02 10:31:18 +020074/*
75 * Is the specified rcu_segcblist enabled, for example, not corresponding
Paul E. McKenneye83e73f2019-05-14 09:50:49 -070076 * to an offline CPU?
Ingo Molnar45753c52017-05-02 10:31:18 +020077 */
78static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
79{
Frederic Weisbecker65e56032020-11-13 13:13:16 +010080 return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED);
Ingo Molnar45753c52017-05-02 10:31:18 +020081}
82
Frederic Weisbecker213d56b2021-10-19 02:08:07 +020083/*
84 * Is the specified rcu_segcblist NOCB offloaded (or in the middle of the
85 * [de]offloading process)?
86 */
Paul E. McKenneyce5215c2019-04-12 15:58:34 -070087static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
88{
Paul E. McKenneyf7590812020-12-21 11:17:16 -080089 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
Frederic Weisbecker213d56b2021-10-19 02:08:07 +020090 rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING))
Paul E. McKenneyf7590812020-12-21 11:17:16 -080091 return true;
Frederic Weisbecker8d346d42020-11-13 13:13:17 +010092
93 return false;
Paul E. McKenneyce5215c2019-04-12 15:58:34 -070094}
95
Frederic Weisbecker32aa2f42020-11-13 13:13:27 +010096static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp)
97{
Frederic Weisbecker213d56b2021-10-19 02:08:07 +020098 if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
99 !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE))
Paul E. McKenneyf7590812020-12-21 11:17:16 -0800100 return true;
Frederic Weisbecker32aa2f42020-11-13 13:13:27 +0100101
102 return false;
103}
104
Ingo Molnar45753c52017-05-02 10:31:18 +0200105/*
Ingo Molnar45753c52017-05-02 10:31:18 +0200106 * Are all segments following the specified segment of the specified
107 * rcu_segcblist structure empty of callbacks? (The specified
108 * segment might well contain callbacks.)
109 */
110static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
111{
Paul E. McKenney76c69272019-05-13 14:36:11 -0700112 return !READ_ONCE(*READ_ONCE(rsclp->tails[seg]));
Ingo Molnar45753c52017-05-02 10:31:18 +0200113}
114
Paul E. McKenney34169062020-12-18 10:20:34 -0800115/*
116 * Is the specified segment of the specified rcu_segcblist structure
117 * empty of callbacks?
118 */
119static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg)
120{
121 if (seg == RCU_DONE_TAIL)
122 return &rsclp->head == rsclp->tails[RCU_DONE_TAIL];
123 return rsclp->tails[seg - 1] == rsclp->tails[seg];
124}
125
Paul E. McKenneyd1b222c2019-07-02 16:03:33 -0700126void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp);
Joel Fernandes (Google)6bc33582020-11-03 09:25:57 -0500127void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700128void rcu_segcblist_init(struct rcu_segcblist *rsclp);
129void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
Frederic Weisbeckerd97b0782020-11-13 13:13:19 +0100130void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700131bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
132bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700133struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
134struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -0700135bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700136void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -0400137 struct rcu_head *rhp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700138bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -0400139 struct rcu_head *rhp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700140void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
141 struct rcu_cblist *rclp);
142void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
143 struct rcu_cblist *rclp);
144void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
145 struct rcu_cblist *rclp);
146void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
147 struct rcu_cblist *rclp);
148void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
149 struct rcu_cblist *rclp);
150void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
151bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
Paul E. McKenneyf2dbe4a2017-06-27 07:44:06 -0700152void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
153 struct rcu_segcblist *src_rsclp);