blob: 1ff996647d3cc17304747c291e0b54e0d4c52f23 [file] [log] [blame]
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Ingo Molnar45753c52017-05-02 10:31:18 +02002/*
Paul E. McKenney98059b92017-05-02 06:30:12 -07003 * RCU segmented callback lists, internal-to-rcu header file
Ingo Molnar45753c52017-05-02 10:31:18 +02004 *
Ingo Molnar45753c52017-05-02 10:31:18 +02005 * Copyright IBM Corporation, 2017
6 *
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Ingo Molnar45753c52017-05-02 10:31:18 +02008 */
9
10#include <linux/rcu_segcblist.h>
11
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070012/* Return number of callbacks in the specified callback list. */
13static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp)
14{
15 return READ_ONCE(rclp->len);
16}
17
Ingo Molnar45753c52017-05-02 10:31:18 +020018/*
Ingo Molnar45753c52017-05-02 10:31:18 +020019 * Account for the fact that a previously dequeued callback turned out
20 * to be marked as lazy.
21 */
22static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
23{
24 rclp->len_lazy--;
25}
26
Paul E. McKenney98059b92017-05-02 06:30:12 -070027void rcu_cblist_init(struct rcu_cblist *rclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070028struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
Ingo Molnar45753c52017-05-02 10:31:18 +020029
30/*
31 * Is the specified rcu_segcblist structure empty?
32 *
33 * But careful! The fact that the ->head field is NULL does not
34 * necessarily imply that there are no callbacks associated with
35 * this structure. When callbacks are being invoked, they are
36 * removed as a group. If callback invocation must be preempted,
37 * the remaining callbacks will be added back to the list. Either
38 * way, the counts are updated later.
39 *
40 * So it is often the case that rcu_segcblist_n_cbs() should be used
41 * instead.
42 */
43static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
44{
Paul E. McKenneye6060b42019-05-13 15:57:50 -070045 return !READ_ONCE(rsclp->head);
Ingo Molnar45753c52017-05-02 10:31:18 +020046}
47
48/* Return number of callbacks in segmented callback list. */
49static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
50{
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070051#ifdef CONFIG_RCU_NOCB_CPU
52 return atomic_long_read(&rsclp->len);
53#else
Ingo Molnar45753c52017-05-02 10:31:18 +020054 return READ_ONCE(rsclp->len);
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070055#endif
Ingo Molnar45753c52017-05-02 10:31:18 +020056}
57
58/* Return number of lazy callbacks in segmented callback list. */
59static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
60{
61 return rsclp->len_lazy;
62}
63
64/* Return number of lazy callbacks in segmented callback list. */
65static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
66{
Paul E. McKenneyeda669a2019-07-01 17:36:53 -070067 return rcu_segcblist_n_cbs(rsclp) - rsclp->len_lazy;
Ingo Molnar45753c52017-05-02 10:31:18 +020068}
69
70/*
71 * Is the specified rcu_segcblist enabled, for example, not corresponding
Paul E. McKenneye83e73f2019-05-14 09:50:49 -070072 * to an offline CPU?
Ingo Molnar45753c52017-05-02 10:31:18 +020073 */
74static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
75{
Paul E. McKenney1bb5f9b2019-04-12 12:34:41 -070076 return rsclp->enabled;
Ingo Molnar45753c52017-05-02 10:31:18 +020077}
78
Paul E. McKenneyce5215c2019-04-12 15:58:34 -070079/* Is the specified rcu_segcblist offloaded? */
80static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
81{
82 return rsclp->offloaded;
83}
84
Ingo Molnar45753c52017-05-02 10:31:18 +020085/*
Ingo Molnar45753c52017-05-02 10:31:18 +020086 * Are all segments following the specified segment of the specified
87 * rcu_segcblist structure empty of callbacks? (The specified
88 * segment might well contain callbacks.)
89 */
90static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
91{
Paul E. McKenney76c69272019-05-13 14:36:11 -070092 return !READ_ONCE(*READ_ONCE(rsclp->tails[seg]));
Ingo Molnar45753c52017-05-02 10:31:18 +020093}
94
Paul E. McKenney98059b92017-05-02 06:30:12 -070095void rcu_segcblist_init(struct rcu_segcblist *rsclp);
96void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
Paul E. McKenneyce5215c2019-04-12 15:58:34 -070097void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070098bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
99bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700100struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
101struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
Paul E. McKenney5d6742b2019-05-15 09:56:40 -0700102bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700103void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
104 struct rcu_head *rhp, bool lazy);
105bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
106 struct rcu_head *rhp, bool lazy);
107void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
108 struct rcu_cblist *rclp);
109void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
110 struct rcu_cblist *rclp);
111void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
112 struct rcu_cblist *rclp);
113void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
114 struct rcu_cblist *rclp);
115void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
116 struct rcu_cblist *rclp);
117void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
118 struct rcu_cblist *rclp);
119void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
120bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
Paul E. McKenneyf2dbe4a2017-06-27 07:44:06 -0700121void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
122 struct rcu_segcblist *src_rsclp);