Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 2 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 3 | * RCU segmented callback lists, internal-to-rcu header file |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 4 | * |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2017 |
| 6 | * |
Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/rcu_segcblist.h> |
| 11 | |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 12 | /* Return number of callbacks in the specified callback list. */ |
| 13 | static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) |
| 14 | { |
| 15 | return READ_ONCE(rclp->len); |
| 16 | } |
| 17 | |
Joel Fernandes (Google) | b4e6039 | 2020-11-18 11:15:41 -0500 | [diff] [blame] | 18 | /* Return number of callbacks in segmented callback list by summing seglen. */ |
| 19 | long rcu_segcblist_n_segment_cbs(struct rcu_segcblist *rsclp); |
| 20 | |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 21 | void rcu_cblist_init(struct rcu_cblist *rclp); |
Paul E. McKenney | d1b222c | 2019-07-02 16:03:33 -0700 | [diff] [blame] | 22 | void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp); |
| 23 | void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp, |
| 24 | struct rcu_cblist *srclp, |
| 25 | struct rcu_head *rhp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 26 | struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 27 | |
| 28 | /* |
| 29 | * Is the specified rcu_segcblist structure empty? |
| 30 | * |
| 31 | * But careful! The fact that the ->head field is NULL does not |
| 32 | * necessarily imply that there are no callbacks associated with |
| 33 | * this structure. When callbacks are being invoked, they are |
| 34 | * removed as a group. If callback invocation must be preempted, |
| 35 | * the remaining callbacks will be added back to the list. Either |
| 36 | * way, the counts are updated later. |
| 37 | * |
| 38 | * So it is often the case that rcu_segcblist_n_cbs() should be used |
| 39 | * instead. |
| 40 | */ |
| 41 | static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) |
| 42 | { |
Paul E. McKenney | e6060b4 | 2019-05-13 15:57:50 -0700 | [diff] [blame] | 43 | return !READ_ONCE(rsclp->head); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | /* Return number of callbacks in segmented callback list. */ |
| 47 | static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) |
| 48 | { |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 49 | #ifdef CONFIG_RCU_NOCB_CPU |
| 50 | return atomic_long_read(&rsclp->len); |
| 51 | #else |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 52 | return READ_ONCE(rsclp->len); |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 53 | #endif |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 54 | } |
| 55 | |
Frederic Weisbecker | 65e5603 | 2020-11-13 13:13:16 +0100 | [diff] [blame] | 56 | static inline void rcu_segcblist_set_flags(struct rcu_segcblist *rsclp, |
| 57 | int flags) |
| 58 | { |
| 59 | rsclp->flags |= flags; |
| 60 | } |
| 61 | |
| 62 | static inline void rcu_segcblist_clear_flags(struct rcu_segcblist *rsclp, |
| 63 | int flags) |
| 64 | { |
| 65 | rsclp->flags &= ~flags; |
| 66 | } |
| 67 | |
| 68 | static inline bool rcu_segcblist_test_flags(struct rcu_segcblist *rsclp, |
| 69 | int flags) |
| 70 | { |
| 71 | return READ_ONCE(rsclp->flags) & flags; |
| 72 | } |
| 73 | |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 74 | /* |
| 75 | * Is the specified rcu_segcblist enabled, for example, not corresponding |
Paul E. McKenney | e83e73f | 2019-05-14 09:50:49 -0700 | [diff] [blame] | 76 | * to an offline CPU? |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 77 | */ |
| 78 | static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) |
| 79 | { |
Frederic Weisbecker | 65e5603 | 2020-11-13 13:13:16 +0100 | [diff] [blame] | 80 | return rcu_segcblist_test_flags(rsclp, SEGCBLIST_ENABLED); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 81 | } |
| 82 | |
Frederic Weisbecker | 213d56b | 2021-10-19 02:08:07 +0200 | [diff] [blame] | 83 | /* |
| 84 | * Is the specified rcu_segcblist NOCB offloaded (or in the middle of the |
| 85 | * [de]offloading process)? |
| 86 | */ |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 87 | static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) |
| 88 | { |
Paul E. McKenney | f759081 | 2020-12-21 11:17:16 -0800 | [diff] [blame] | 89 | if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && |
Frederic Weisbecker | 213d56b | 2021-10-19 02:08:07 +0200 | [diff] [blame] | 90 | rcu_segcblist_test_flags(rsclp, SEGCBLIST_LOCKING)) |
Paul E. McKenney | f759081 | 2020-12-21 11:17:16 -0800 | [diff] [blame] | 91 | return true; |
Frederic Weisbecker | 8d346d4 | 2020-11-13 13:13:17 +0100 | [diff] [blame] | 92 | |
| 93 | return false; |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 94 | } |
| 95 | |
Frederic Weisbecker | 32aa2f4 | 2020-11-13 13:13:27 +0100 | [diff] [blame] | 96 | static inline bool rcu_segcblist_completely_offloaded(struct rcu_segcblist *rsclp) |
| 97 | { |
Frederic Weisbecker | 213d56b | 2021-10-19 02:08:07 +0200 | [diff] [blame] | 98 | if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) && |
| 99 | !rcu_segcblist_test_flags(rsclp, SEGCBLIST_RCU_CORE)) |
Paul E. McKenney | f759081 | 2020-12-21 11:17:16 -0800 | [diff] [blame] | 100 | return true; |
Frederic Weisbecker | 32aa2f4 | 2020-11-13 13:13:27 +0100 | [diff] [blame] | 101 | |
| 102 | return false; |
| 103 | } |
| 104 | |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 105 | /* |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 106 | * Are all segments following the specified segment of the specified |
| 107 | * rcu_segcblist structure empty of callbacks? (The specified |
| 108 | * segment might well contain callbacks.) |
| 109 | */ |
| 110 | static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) |
| 111 | { |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 112 | return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 113 | } |
| 114 | |
Paul E. McKenney | 3416906 | 2020-12-18 10:20:34 -0800 | [diff] [blame] | 115 | /* |
| 116 | * Is the specified segment of the specified rcu_segcblist structure |
| 117 | * empty of callbacks? |
| 118 | */ |
| 119 | static inline bool rcu_segcblist_segempty(struct rcu_segcblist *rsclp, int seg) |
| 120 | { |
| 121 | if (seg == RCU_DONE_TAIL) |
| 122 | return &rsclp->head == rsclp->tails[RCU_DONE_TAIL]; |
| 123 | return rsclp->tails[seg - 1] == rsclp->tails[seg]; |
| 124 | } |
| 125 | |
Paul E. McKenney | d1b222c | 2019-07-02 16:03:33 -0700 | [diff] [blame] | 126 | void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp); |
Joel Fernandes (Google) | 6bc3358 | 2020-11-03 09:25:57 -0500 | [diff] [blame] | 127 | void rcu_segcblist_add_len(struct rcu_segcblist *rsclp, long v); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 128 | void rcu_segcblist_init(struct rcu_segcblist *rsclp); |
| 129 | void rcu_segcblist_disable(struct rcu_segcblist *rsclp); |
Frederic Weisbecker | d97b078 | 2020-11-13 13:13:19 +0100 | [diff] [blame] | 130 | void rcu_segcblist_offload(struct rcu_segcblist *rsclp, bool offload); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 131 | bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); |
| 132 | bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 133 | struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); |
| 134 | struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 5d6742b | 2019-05-15 09:56:40 -0700 | [diff] [blame] | 135 | bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 136 | void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 137 | struct rcu_head *rhp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 138 | bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 139 | struct rcu_head *rhp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 140 | void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, |
| 141 | struct rcu_cblist *rclp); |
| 142 | void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, |
| 143 | struct rcu_cblist *rclp); |
| 144 | void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, |
| 145 | struct rcu_cblist *rclp); |
| 146 | void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, |
| 147 | struct rcu_cblist *rclp); |
| 148 | void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, |
| 149 | struct rcu_cblist *rclp); |
| 150 | void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq); |
| 151 | bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq); |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 152 | void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, |
| 153 | struct rcu_segcblist *src_rsclp); |