Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 2 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 3 | * RCU segmented callback lists, internal-to-rcu header file |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 4 | * |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2017 |
| 6 | * |
Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/rcu_segcblist.h> |
| 11 | |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame^] | 12 | /* Return number of callbacks in the specified callback list. */ |
| 13 | static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) |
| 14 | { |
| 15 | return READ_ONCE(rclp->len); |
| 16 | } |
| 17 | |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 18 | /* |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 19 | * Account for the fact that a previously dequeued callback turned out |
| 20 | * to be marked as lazy. |
| 21 | */ |
| 22 | static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) |
| 23 | { |
| 24 | rclp->len_lazy--; |
| 25 | } |
| 26 | |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 27 | void rcu_cblist_init(struct rcu_cblist *rclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 28 | struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 29 | |
| 30 | /* |
| 31 | * Is the specified rcu_segcblist structure empty? |
| 32 | * |
| 33 | * But careful! The fact that the ->head field is NULL does not |
| 34 | * necessarily imply that there are no callbacks associated with |
| 35 | * this structure. When callbacks are being invoked, they are |
| 36 | * removed as a group. If callback invocation must be preempted, |
| 37 | * the remaining callbacks will be added back to the list. Either |
| 38 | * way, the counts are updated later. |
| 39 | * |
| 40 | * So it is often the case that rcu_segcblist_n_cbs() should be used |
| 41 | * instead. |
| 42 | */ |
| 43 | static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) |
| 44 | { |
Paul E. McKenney | e6060b4 | 2019-05-13 15:57:50 -0700 | [diff] [blame] | 45 | return !READ_ONCE(rsclp->head); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | /* Return number of callbacks in segmented callback list. */ |
| 49 | static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) |
| 50 | { |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame^] | 51 | #ifdef CONFIG_RCU_NOCB_CPU |
| 52 | return atomic_long_read(&rsclp->len); |
| 53 | #else |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 54 | return READ_ONCE(rsclp->len); |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame^] | 55 | #endif |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | /* Return number of lazy callbacks in segmented callback list. */ |
| 59 | static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp) |
| 60 | { |
| 61 | return rsclp->len_lazy; |
| 62 | } |
| 63 | |
| 64 | /* Return number of lazy callbacks in segmented callback list. */ |
| 65 | static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp) |
| 66 | { |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame^] | 67 | return rcu_segcblist_n_cbs(rsclp) - rsclp->len_lazy; |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Is the specified rcu_segcblist enabled, for example, not corresponding |
Paul E. McKenney | e83e73f | 2019-05-14 09:50:49 -0700 | [diff] [blame] | 72 | * to an offline CPU? |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 73 | */ |
| 74 | static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) |
| 75 | { |
Paul E. McKenney | 1bb5f9b | 2019-04-12 12:34:41 -0700 | [diff] [blame] | 76 | return rsclp->enabled; |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 77 | } |
| 78 | |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 79 | /* Is the specified rcu_segcblist offloaded? */ |
| 80 | static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) |
| 81 | { |
| 82 | return rsclp->offloaded; |
| 83 | } |
| 84 | |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 85 | /* |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 86 | * Are all segments following the specified segment of the specified |
| 87 | * rcu_segcblist structure empty of callbacks? (The specified |
| 88 | * segment might well contain callbacks.) |
| 89 | */ |
| 90 | static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) |
| 91 | { |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 92 | return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 93 | } |
| 94 | |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 95 | void rcu_segcblist_init(struct rcu_segcblist *rsclp); |
| 96 | void rcu_segcblist_disable(struct rcu_segcblist *rsclp); |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 97 | void rcu_segcblist_offload(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 98 | bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); |
| 99 | bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 100 | struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); |
| 101 | struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 5d6742b | 2019-05-15 09:56:40 -0700 | [diff] [blame] | 102 | bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 103 | void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, |
| 104 | struct rcu_head *rhp, bool lazy); |
| 105 | bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, |
| 106 | struct rcu_head *rhp, bool lazy); |
| 107 | void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, |
| 108 | struct rcu_cblist *rclp); |
| 109 | void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, |
| 110 | struct rcu_cblist *rclp); |
| 111 | void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, |
| 112 | struct rcu_cblist *rclp); |
| 113 | void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, |
| 114 | struct rcu_cblist *rclp); |
| 115 | void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, |
| 116 | struct rcu_cblist *rclp); |
| 117 | void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, |
| 118 | struct rcu_cblist *rclp); |
| 119 | void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq); |
| 120 | bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq); |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 121 | void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, |
| 122 | struct rcu_segcblist *src_rsclp); |