blob: 71b64648464e464961f3835d6010ec353cf6262e [file] [log] [blame]
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Ingo Molnar45753c52017-05-02 10:31:18 +02002/*
Paul E. McKenney98059b92017-05-02 06:30:12 -07003 * RCU segmented callback lists, internal-to-rcu header file
Ingo Molnar45753c52017-05-02 10:31:18 +02004 *
Ingo Molnar45753c52017-05-02 10:31:18 +02005 * Copyright IBM Corporation, 2017
6 *
Paul E. McKenneyeb7935e2019-01-17 10:13:19 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Ingo Molnar45753c52017-05-02 10:31:18 +02008 */
9
10#include <linux/rcu_segcblist.h>
11
Ingo Molnar45753c52017-05-02 10:31:18 +020012/*
Ingo Molnar45753c52017-05-02 10:31:18 +020013 * Account for the fact that a previously dequeued callback turned out
14 * to be marked as lazy.
15 */
16static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
17{
18 rclp->len_lazy--;
19}
20
Paul E. McKenney98059b92017-05-02 06:30:12 -070021void rcu_cblist_init(struct rcu_cblist *rclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -070022struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
Ingo Molnar45753c52017-05-02 10:31:18 +020023
24/*
25 * Is the specified rcu_segcblist structure empty?
26 *
27 * But careful! The fact that the ->head field is NULL does not
28 * necessarily imply that there are no callbacks associated with
29 * this structure. When callbacks are being invoked, they are
30 * removed as a group. If callback invocation must be preempted,
31 * the remaining callbacks will be added back to the list. Either
32 * way, the counts are updated later.
33 *
34 * So it is often the case that rcu_segcblist_n_cbs() should be used
35 * instead.
36 */
37static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
38{
39 return !rsclp->head;
40}
41
42/* Return number of callbacks in segmented callback list. */
43static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
44{
45 return READ_ONCE(rsclp->len);
46}
47
48/* Return number of lazy callbacks in segmented callback list. */
49static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
50{
51 return rsclp->len_lazy;
52}
53
54/* Return number of lazy callbacks in segmented callback list. */
55static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
56{
57 return rsclp->len - rsclp->len_lazy;
58}
59
60/*
61 * Is the specified rcu_segcblist enabled, for example, not corresponding
62 * to an offline or callback-offloaded CPU?
63 */
64static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
65{
66 return !!rsclp->tails[RCU_NEXT_TAIL];
67}
68
69/*
Ingo Molnar45753c52017-05-02 10:31:18 +020070 * Are all segments following the specified segment of the specified
71 * rcu_segcblist structure empty of callbacks? (The specified
72 * segment might well contain callbacks.)
73 */
74static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
75{
76 return !*rsclp->tails[seg];
77}
78
79/*
Ingo Molnar45753c52017-05-02 10:31:18 +020080 * Interim function to return rcu_segcblist head pointer. Longer term, the
81 * rcu_segcblist will be used more pervasively, removing the need for this
82 * function.
83 */
84static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp)
85{
86 return rsclp->head;
87}
88
89/*
90 * Interim function to return rcu_segcblist head pointer. Longer term, the
91 * rcu_segcblist will be used more pervasively, removing the need for this
92 * function.
93 */
94static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp)
95{
96 WARN_ON_ONCE(rcu_segcblist_empty(rsclp));
97 return rsclp->tails[RCU_NEXT_TAIL];
98}
Paul E. McKenney98059b92017-05-02 06:30:12 -070099
100void rcu_segcblist_init(struct rcu_segcblist *rsclp);
101void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700102bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
103bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700104struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
105struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
Paul E. McKenney98059b92017-05-02 06:30:12 -0700106void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
107 struct rcu_head *rhp, bool lazy);
108bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
109 struct rcu_head *rhp, bool lazy);
110void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
111 struct rcu_cblist *rclp);
112void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
113 struct rcu_cblist *rclp);
114void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
115 struct rcu_cblist *rclp);
116void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
117 struct rcu_cblist *rclp);
118void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
119 struct rcu_cblist *rclp);
120void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
121 struct rcu_cblist *rclp);
122void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
123bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
Paul E. McKenneyf2dbe4a2017-06-27 07:44:06 -0700124void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
125 struct rcu_segcblist *src_rsclp);