Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 2 | /* |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 3 | * RCU segmented callback lists, internal-to-rcu header file |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 4 | * |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2017 |
| 6 | * |
Paul E. McKenney | eb7935e | 2019-01-17 10:13:19 -0800 | [diff] [blame] | 7 | * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <linux/rcu_segcblist.h> |
| 11 | |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 12 | /* Return number of callbacks in the specified callback list. */ |
| 13 | static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) |
| 14 | { |
| 15 | return READ_ONCE(rclp->len); |
| 16 | } |
| 17 | |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 18 | void rcu_cblist_init(struct rcu_cblist *rclp); |
Paul E. McKenney | d1b222c | 2019-07-02 16:03:33 -0700 | [diff] [blame] | 19 | void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp); |
| 20 | void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp, |
| 21 | struct rcu_cblist *srclp, |
| 22 | struct rcu_head *rhp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 23 | struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * Is the specified rcu_segcblist structure empty? |
| 27 | * |
| 28 | * But careful! The fact that the ->head field is NULL does not |
| 29 | * necessarily imply that there are no callbacks associated with |
| 30 | * this structure. When callbacks are being invoked, they are |
| 31 | * removed as a group. If callback invocation must be preempted, |
| 32 | * the remaining callbacks will be added back to the list. Either |
| 33 | * way, the counts are updated later. |
| 34 | * |
| 35 | * So it is often the case that rcu_segcblist_n_cbs() should be used |
| 36 | * instead. |
| 37 | */ |
| 38 | static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) |
| 39 | { |
Paul E. McKenney | e6060b4 | 2019-05-13 15:57:50 -0700 | [diff] [blame] | 40 | return !READ_ONCE(rsclp->head); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 41 | } |
| 42 | |
| 43 | /* Return number of callbacks in segmented callback list. */ |
| 44 | static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) |
| 45 | { |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 46 | #ifdef CONFIG_RCU_NOCB_CPU |
| 47 | return atomic_long_read(&rsclp->len); |
| 48 | #else |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 49 | return READ_ONCE(rsclp->len); |
Paul E. McKenney | eda669a | 2019-07-01 17:36:53 -0700 | [diff] [blame] | 50 | #endif |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 51 | } |
| 52 | |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 53 | /* |
| 54 | * Is the specified rcu_segcblist enabled, for example, not corresponding |
Paul E. McKenney | e83e73f | 2019-05-14 09:50:49 -0700 | [diff] [blame] | 55 | * to an offline CPU? |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 56 | */ |
| 57 | static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) |
| 58 | { |
Paul E. McKenney | 1bb5f9b | 2019-04-12 12:34:41 -0700 | [diff] [blame] | 59 | return rsclp->enabled; |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 60 | } |
| 61 | |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 62 | /* Is the specified rcu_segcblist offloaded? */ |
| 63 | static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) |
| 64 | { |
| 65 | return rsclp->offloaded; |
| 66 | } |
| 67 | |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 68 | /* |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 69 | * Are all segments following the specified segment of the specified |
| 70 | * rcu_segcblist structure empty of callbacks? (The specified |
| 71 | * segment might well contain callbacks.) |
| 72 | */ |
| 73 | static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) |
| 74 | { |
Paul E. McKenney | 76c6927 | 2019-05-13 14:36:11 -0700 | [diff] [blame] | 75 | return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 76 | } |
| 77 | |
Paul E. McKenney | d1b222c | 2019-07-02 16:03:33 -0700 | [diff] [blame] | 78 | void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 79 | void rcu_segcblist_init(struct rcu_segcblist *rsclp); |
| 80 | void rcu_segcblist_disable(struct rcu_segcblist *rsclp); |
Paul E. McKenney | ce5215c | 2019-04-12 15:58:34 -0700 | [diff] [blame] | 81 | void rcu_segcblist_offload(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 82 | bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); |
| 83 | bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 84 | struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); |
| 85 | struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp); |
Paul E. McKenney | 5d6742b | 2019-05-15 09:56:40 -0700 | [diff] [blame] | 86 | bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 87 | void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame^] | 88 | struct rcu_head *rhp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 89 | bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame^] | 90 | struct rcu_head *rhp); |
Paul E. McKenney | 98059b9 | 2017-05-02 06:30:12 -0700 | [diff] [blame] | 91 | void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, |
| 92 | struct rcu_cblist *rclp); |
| 93 | void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, |
| 94 | struct rcu_cblist *rclp); |
| 95 | void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, |
| 96 | struct rcu_cblist *rclp); |
| 97 | void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, |
| 98 | struct rcu_cblist *rclp); |
| 99 | void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, |
| 100 | struct rcu_cblist *rclp); |
| 101 | void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, |
| 102 | struct rcu_cblist *rclp); |
| 103 | void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq); |
| 104 | bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq); |
Paul E. McKenney | f2dbe4a | 2017-06-27 07:44:06 -0700 | [diff] [blame] | 105 | void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, |
| 106 | struct rcu_segcblist *src_rsclp); |