Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) |
| 3 | * Internal non-public definitions. |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
Paul E. McKenney | 87de1cf | 2013-12-03 10:02:52 -0800 | [diff] [blame] | 16 | * along with this program; if not, you can access it online at |
| 17 | * http://www.gnu.org/licenses/gpl-2.0.html. |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 18 | * |
| 19 | * Copyright IBM Corporation, 2008 |
| 20 | * |
| 21 | * Author: Ingo Molnar <mingo@elte.hu> |
| 22 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 23 | */ |
| 24 | |
| 25 | #include <linux/cache.h> |
| 26 | #include <linux/spinlock.h> |
| 27 | #include <linux/threads.h> |
| 28 | #include <linux/cpumask.h> |
| 29 | #include <linux/seqlock.h> |
Paul Gortmaker | abedf8e | 2016-02-19 09:46:41 +0100 | [diff] [blame] | 30 | #include <linux/swait.h> |
Peter Zijlstra | 3a6d7c6 | 2015-06-25 11:27:10 -0700 | [diff] [blame] | 31 | #include <linux/stop_machine.h> |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 32 | |
| 33 | /* |
Paul E. McKenney | 8932a63 | 2012-04-19 12:20:14 -0700 | [diff] [blame] | 34 | * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and |
| 35 | * CONFIG_RCU_FANOUT_LEAF. |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 36 | * In theory, it should be possible to add more levels straightforwardly. |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 37 | * In practice, this did work well going from three levels to four. |
| 38 | * Of course, your mileage may vary. |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 39 | */ |
Paul E. McKenney | 05c5df3 | 2015-04-20 14:27:43 -0700 | [diff] [blame] | 40 | |
Paul E. McKenney | 05c5df3 | 2015-04-20 14:27:43 -0700 | [diff] [blame] | 41 | #ifdef CONFIG_RCU_FANOUT |
| 42 | #define RCU_FANOUT CONFIG_RCU_FANOUT |
| 43 | #else /* #ifdef CONFIG_RCU_FANOUT */ |
| 44 | # ifdef CONFIG_64BIT |
| 45 | # define RCU_FANOUT 64 |
| 46 | # else |
| 47 | # define RCU_FANOUT 32 |
| 48 | # endif |
| 49 | #endif /* #else #ifdef CONFIG_RCU_FANOUT */ |
| 50 | |
Paul E. McKenney | 47d631a | 2015-04-21 09:12:13 -0700 | [diff] [blame] | 51 | #ifdef CONFIG_RCU_FANOUT_LEAF |
| 52 | #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF |
| 53 | #else /* #ifdef CONFIG_RCU_FANOUT_LEAF */ |
| 54 | # ifdef CONFIG_64BIT |
| 55 | # define RCU_FANOUT_LEAF 64 |
| 56 | # else |
| 57 | # define RCU_FANOUT_LEAF 32 |
| 58 | # endif |
| 59 | #endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */ |
| 60 | |
| 61 | #define RCU_FANOUT_1 (RCU_FANOUT_LEAF) |
Paul E. McKenney | 05c5df3 | 2015-04-20 14:27:43 -0700 | [diff] [blame] | 62 | #define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT) |
| 63 | #define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT) |
| 64 | #define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT) |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 65 | |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 66 | #if NR_CPUS <= RCU_FANOUT_1 |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 67 | # define RCU_NUM_LVLS 1 |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 68 | # define NUM_RCU_LVL_0 1 |
Alexander Gordeev | 4262169 | 2015-06-03 08:18:31 +0200 | [diff] [blame] | 69 | # define NUM_RCU_NODES NUM_RCU_LVL_0 |
Alexander Gordeev | cb00710 | 2015-06-03 08:18:30 +0200 | [diff] [blame] | 70 | # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 } |
| 71 | # define RCU_NODE_NAME_INIT { "rcu_node_0" } |
| 72 | # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" } |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 73 | #elif NR_CPUS <= RCU_FANOUT_2 |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 74 | # define RCU_NUM_LVLS 2 |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 75 | # define NUM_RCU_LVL_0 1 |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 76 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
Alexander Gordeev | 4262169 | 2015-06-03 08:18:31 +0200 | [diff] [blame] | 77 | # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1) |
Alexander Gordeev | cb00710 | 2015-06-03 08:18:30 +0200 | [diff] [blame] | 78 | # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 } |
| 79 | # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" } |
| 80 | # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" } |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 81 | #elif NR_CPUS <= RCU_FANOUT_3 |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 82 | # define RCU_NUM_LVLS 3 |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 83 | # define NUM_RCU_LVL_0 1 |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 84 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) |
| 85 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
Alexander Gordeev | 4262169 | 2015-06-03 08:18:31 +0200 | [diff] [blame] | 86 | # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2) |
Alexander Gordeev | cb00710 | 2015-06-03 08:18:30 +0200 | [diff] [blame] | 87 | # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 } |
| 88 | # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" } |
| 89 | # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" } |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 90 | #elif NR_CPUS <= RCU_FANOUT_4 |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 91 | # define RCU_NUM_LVLS 4 |
Paul E. McKenney | cf244dc | 2009-12-02 12:10:14 -0800 | [diff] [blame] | 92 | # define NUM_RCU_LVL_0 1 |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 93 | # define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3) |
| 94 | # define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2) |
| 95 | # define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1) |
Alexander Gordeev | 4262169 | 2015-06-03 08:18:31 +0200 | [diff] [blame] | 96 | # define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) |
Alexander Gordeev | cb00710 | 2015-06-03 08:18:30 +0200 | [diff] [blame] | 97 | # define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 } |
| 98 | # define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" } |
| 99 | # define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" } |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 100 | #else |
| 101 | # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" |
Paul E. McKenney | 0209f64 | 2010-12-14 16:07:52 -0800 | [diff] [blame] | 102 | #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 103 | |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 104 | extern int rcu_num_lvls; |
| 105 | extern int rcu_num_nodes; |
| 106 | |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 107 | /* |
| 108 | * Dynticks per-CPU state. |
| 109 | */ |
| 110 | struct rcu_dynticks { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 111 | long long dynticks_nesting; /* Track irq/process nesting level. */ |
| 112 | /* Process level is worth LLONG_MAX/2. */ |
| 113 | int dynticks_nmi_nesting; /* Track NMI nesting level. */ |
| 114 | atomic_t dynticks; /* Even value for idle, else odd. */ |
Paul E. McKenney | 2333210 | 2013-06-21 12:34:33 -0700 | [diff] [blame] | 115 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
| 116 | long long dynticks_idle_nesting; |
| 117 | /* irq/process nesting level from idle. */ |
| 118 | atomic_t dynticks_idle; /* Even value for idle, else odd. */ |
| 119 | /* "Idle" excludes userspace execution. */ |
| 120 | unsigned long dynticks_idle_jiffies; |
| 121 | /* End of last non-NMI non-idle period. */ |
| 122 | #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
Paul E. McKenney | 5955f7e | 2012-05-09 12:07:05 -0700 | [diff] [blame] | 123 | #ifdef CONFIG_RCU_FAST_NO_HZ |
Paul E. McKenney | c0f4dfd | 2012-12-28 11:30:36 -0800 | [diff] [blame] | 124 | bool all_lazy; /* Are all CPU's CBs lazy? */ |
Paul E. McKenney | 5955f7e | 2012-05-09 12:07:05 -0700 | [diff] [blame] | 125 | unsigned long nonlazy_posted; |
| 126 | /* # times non-lazy CBs posted to CPU. */ |
| 127 | unsigned long nonlazy_posted_snap; |
| 128 | /* idle-period nonlazy_posted snapshot. */ |
Paul E. McKenney | c0f4dfd | 2012-12-28 11:30:36 -0800 | [diff] [blame] | 129 | unsigned long last_accelerate; |
| 130 | /* Last jiffy CBs were accelerated. */ |
Paul E. McKenney | c229828 | 2013-08-25 21:20:47 -0700 | [diff] [blame] | 131 | unsigned long last_advance_all; |
| 132 | /* Last jiffy CBs were all advanced. */ |
Paul E. McKenney | 9d2ad24 | 2012-06-24 10:15:02 -0700 | [diff] [blame] | 133 | int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ |
Paul E. McKenney | 5955f7e | 2012-05-09 12:07:05 -0700 | [diff] [blame] | 134 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 135 | }; |
| 136 | |
Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 137 | /* RCU's kthread states for tracing. */ |
| 138 | #define RCU_KTHREAD_STOPPED 0 |
| 139 | #define RCU_KTHREAD_RUNNING 1 |
| 140 | #define RCU_KTHREAD_WAITING 2 |
Paul E. McKenney | 15ba0ba | 2011-04-06 16:01:16 -0700 | [diff] [blame] | 141 | #define RCU_KTHREAD_OFFCPU 3 |
| 142 | #define RCU_KTHREAD_YIELDING 4 |
| 143 | #define RCU_KTHREAD_MAX 4 |
Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 144 | |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 145 | /* |
| 146 | * Definition for node within the RCU grace-period-detection hierarchy. |
| 147 | */ |
| 148 | struct rcu_node { |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 149 | raw_spinlock_t __private lock; /* Root rcu_node's lock protects */ |
| 150 | /* some rcu_state fields as well as */ |
| 151 | /* following. */ |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 152 | unsigned long gpnum; /* Current grace period for this node. */ |
Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 153 | /* This will either be equal to or one */ |
| 154 | /* behind the root rcu_node's gpnum. */ |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 155 | unsigned long completed; /* Last GP completed for this node. */ |
Paul E. McKenney | d09b62d | 2009-11-02 13:52:28 -0800 | [diff] [blame] | 156 | /* This will either be equal to or one */ |
| 157 | /* behind the root rcu_node's gpnum. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 158 | unsigned long qsmask; /* CPUs or groups that need to switch in */ |
| 159 | /* order for current grace period to proceed.*/ |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 160 | /* In leaf rcu_node, each bit corresponds to */ |
| 161 | /* an rcu_data structure, otherwise, each */ |
| 162 | /* bit corresponds to a child rcu_node */ |
| 163 | /* structure. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 164 | unsigned long qsmaskinit; |
Paul E. McKenney | b9585e9 | 2015-07-31 16:04:45 -0700 | [diff] [blame] | 165 | /* Per-GP initial value for qsmask. */ |
Paul E. McKenney | 0aa04b0 | 2015-01-23 21:52:37 -0800 | [diff] [blame] | 166 | /* Initialized from ->qsmaskinitnext at the */ |
| 167 | /* beginning of each grace period. */ |
| 168 | unsigned long qsmaskinitnext; |
| 169 | /* Online CPUs for next grace period. */ |
Paul E. McKenney | b9585e9 | 2015-07-31 16:04:45 -0700 | [diff] [blame] | 170 | unsigned long expmask; /* CPUs or groups that need to check in */ |
| 171 | /* to allow the current expedited GP */ |
| 172 | /* to complete. */ |
| 173 | unsigned long expmaskinit; |
| 174 | /* Per-GP initial values for expmask. */ |
| 175 | /* Initialized from ->expmaskinitnext at the */ |
| 176 | /* beginning of each expedited GP. */ |
| 177 | unsigned long expmaskinitnext; |
| 178 | /* Online CPUs for next expedited GP. */ |
Paul E. McKenney | 1de6e56 | 2015-09-29 09:45:00 -0700 | [diff] [blame] | 179 | /* Any CPU that has ever been online will */ |
| 180 | /* have its bit set. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 181 | unsigned long grpmask; /* Mask to apply to parent qsmask. */ |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 182 | /* Only one bit will be set in this mask. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 183 | int grplo; /* lowest-numbered CPU or group here. */ |
| 184 | int grphi; /* highest-numbered CPU or group here. */ |
| 185 | u8 grpnum; /* CPU/group number for next level up. */ |
| 186 | u8 level; /* root is at level 0. */ |
Paul E. McKenney | 0aa04b0 | 2015-01-23 21:52:37 -0800 | [diff] [blame] | 187 | bool wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ |
| 188 | /* exit RCU read-side critical sections */ |
| 189 | /* before propagating offline up the */ |
| 190 | /* rcu_node tree? */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 191 | struct rcu_node *parent; |
Paul E. McKenney | 12f5f52 | 2010-11-29 21:56:39 -0800 | [diff] [blame] | 192 | struct list_head blkd_tasks; |
| 193 | /* Tasks blocked in RCU read-side critical */ |
| 194 | /* section. Tasks are placed at the head */ |
| 195 | /* of this list and age towards the tail. */ |
| 196 | struct list_head *gp_tasks; |
| 197 | /* Pointer to the first task blocking the */ |
| 198 | /* current grace period, or NULL if there */ |
| 199 | /* is no such task. */ |
| 200 | struct list_head *exp_tasks; |
| 201 | /* Pointer to the first task blocking the */ |
| 202 | /* current expedited grace period, or NULL */ |
| 203 | /* if there is no such task. If there */ |
| 204 | /* is no current expedited grace period, */ |
| 205 | /* then there can cannot be any such task. */ |
Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 206 | struct list_head *boost_tasks; |
| 207 | /* Pointer to first task that needs to be */ |
| 208 | /* priority boosted, or NULL if no priority */ |
| 209 | /* boosting is needed for this rcu_node */ |
| 210 | /* structure. If there are no tasks */ |
| 211 | /* queued on this rcu_node structure that */ |
| 212 | /* are blocking the current grace period, */ |
| 213 | /* there can be no such task. */ |
Paul E. McKenney | abaa93d | 2014-06-12 13:30:25 -0700 | [diff] [blame] | 214 | struct rt_mutex boost_mtx; |
| 215 | /* Used only for the priority-boosting */ |
| 216 | /* side effect, not as a lock. */ |
Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 217 | unsigned long boost_time; |
| 218 | /* When to start boosting (jiffies). */ |
| 219 | struct task_struct *boost_kthread_task; |
| 220 | /* kthread that takes care of priority */ |
| 221 | /* boosting for this rcu_node structure. */ |
Paul E. McKenney | d71df90 | 2011-03-29 17:48:28 -0700 | [diff] [blame] | 222 | unsigned int boost_kthread_status; |
| 223 | /* State of boost_kthread_task for tracing. */ |
Paul E. McKenney | 0ea1f2e | 2011-02-22 13:42:43 -0800 | [diff] [blame] | 224 | unsigned long n_tasks_boosted; |
| 225 | /* Total number of tasks boosted. */ |
| 226 | unsigned long n_exp_boosts; |
| 227 | /* Number of tasks boosted for expedited GP. */ |
| 228 | unsigned long n_normal_boosts; |
| 229 | /* Number of tasks boosted for normal GP. */ |
| 230 | unsigned long n_balk_blkd_tasks; |
| 231 | /* Refused to boost: no blocked tasks. */ |
| 232 | unsigned long n_balk_exp_gp_tasks; |
| 233 | /* Refused to boost: nothing blocking GP. */ |
| 234 | unsigned long n_balk_boost_tasks; |
| 235 | /* Refused to boost: already boosting. */ |
| 236 | unsigned long n_balk_notblocked; |
| 237 | /* Refused to boost: RCU RS CS still running. */ |
| 238 | unsigned long n_balk_notyet; |
| 239 | /* Refused to boost: not yet time. */ |
| 240 | unsigned long n_balk_nos; |
| 241 | /* Refused to boost: not sure why, though. */ |
| 242 | /* This can happen due to race conditions. */ |
Paul E. McKenney | dae6e64 | 2013-02-10 20:48:58 -0800 | [diff] [blame] | 243 | #ifdef CONFIG_RCU_NOCB_CPU |
Paul Gortmaker | abedf8e | 2016-02-19 09:46:41 +0100 | [diff] [blame] | 244 | struct swait_queue_head nocb_gp_wq[2]; |
Paul E. McKenney | dae6e64 | 2013-02-10 20:48:58 -0800 | [diff] [blame] | 245 | /* Place for rcu_nocb_kthread() to wait GP. */ |
Paul E. McKenney | dae6e64 | 2013-02-10 20:48:58 -0800 | [diff] [blame] | 246 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
Paul E. McKenney | 8b425aa8 | 2012-12-30 13:06:35 -0800 | [diff] [blame] | 247 | int need_future_gp[2]; |
| 248 | /* Counts of upcoming no-CB GP requests. */ |
Paul E. McKenney | 394f276 | 2012-06-26 17:00:35 -0700 | [diff] [blame] | 249 | raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; |
Paul E. McKenney | 385b73c | 2015-06-24 14:20:08 -0700 | [diff] [blame] | 250 | |
Paul E. McKenney | f6a12f3 | 2016-01-30 17:57:35 -0800 | [diff] [blame] | 251 | spinlock_t exp_lock ____cacheline_internodealigned_in_smp; |
| 252 | unsigned long exp_seq_rq; |
Paul E. McKenney | 3b5f668 | 2016-03-16 16:47:55 -0700 | [diff] [blame] | 253 | wait_queue_head_t exp_wq[4]; |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 254 | } ____cacheline_internodealigned_in_smp; |
| 255 | |
Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 256 | /* |
Mark Rutland | bc75e99 | 2016-06-03 15:20:04 +0100 | [diff] [blame] | 257 | * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and |
| 258 | * are indexed relative to this interval rather than the global CPU ID space. |
| 259 | * This generates the bit for a CPU in node-local masks. |
| 260 | */ |
| 261 | #define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo)) |
| 262 | |
| 263 | /* |
Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 264 | * Do a full breadth-first scan of the rcu_node structures for the |
| 265 | * specified rcu_state structure. |
| 266 | */ |
| 267 | #define rcu_for_each_node_breadth_first(rsp, rnp) \ |
| 268 | for ((rnp) = &(rsp)->node[0]; \ |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 269 | (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) |
Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 270 | |
Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 271 | /* |
| 272 | * Do a breadth-first scan of the non-leaf rcu_node structures for the |
| 273 | * specified rcu_state structure. Note that if there is a singleton |
| 274 | * rcu_node tree with but one rcu_node structure, this loop is a no-op. |
| 275 | */ |
| 276 | #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \ |
| 277 | for ((rnp) = &(rsp)->node[0]; \ |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 278 | (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++) |
Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 279 | |
| 280 | /* |
| 281 | * Scan the leaves of the rcu_node hierarchy for the specified rcu_state |
| 282 | * structure. Note that if there is a singleton rcu_node tree with but |
| 283 | * one rcu_node structure, this loop -will- visit the rcu_node structure. |
| 284 | * It is still a leaf node, even if it is also the root node. |
| 285 | */ |
Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 286 | #define rcu_for_each_leaf_node(rsp, rnp) \ |
Paul E. McKenney | f885b7f | 2012-04-23 15:52:53 -0700 | [diff] [blame] | 287 | for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \ |
| 288 | (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) |
Paul E. McKenney | a0b6c9a | 2009-09-28 07:46:33 -0700 | [diff] [blame] | 289 | |
Paul E. McKenney | 5b74c45 | 2015-08-06 15:16:57 -0700 | [diff] [blame] | 290 | /* |
Mark Rutland | bc75e99 | 2016-06-03 15:20:04 +0100 | [diff] [blame] | 291 | * Iterate over all possible CPUs in a leaf RCU node. |
| 292 | */ |
| 293 | #define for_each_leaf_node_possible_cpu(rnp, cpu) \ |
| 294 | for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \ |
| 295 | cpu <= rnp->grphi; \ |
| 296 | cpu = cpumask_next((cpu), cpu_possible_mask)) |
| 297 | |
| 298 | /* |
Paul E. McKenney | 5b74c45 | 2015-08-06 15:16:57 -0700 | [diff] [blame] | 299 | * Union to allow "aggregate OR" operation on the need for a quiescent |
| 300 | * state by the normal and expedited grace periods. |
| 301 | */ |
| 302 | union rcu_noqs { |
| 303 | struct { |
| 304 | u8 norm; |
| 305 | u8 exp; |
| 306 | } b; /* Bits. */ |
| 307 | u16 s; /* Set of bits, aggregate OR here. */ |
| 308 | }; |
| 309 | |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 310 | /* Index values for nxttail array in struct rcu_data. */ |
| 311 | #define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ |
| 312 | #define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ |
| 313 | #define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ |
| 314 | #define RCU_NEXT_TAIL 3 |
| 315 | #define RCU_NEXT_SIZE 4 |
| 316 | |
| 317 | /* Per-CPU data for read-copy update. */ |
| 318 | struct rcu_data { |
| 319 | /* 1) quiescent-state and grace-period handling : */ |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 320 | unsigned long completed; /* Track rsp->completed gp number */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 321 | /* in order to detect GP end. */ |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 322 | unsigned long gpnum; /* Highest gp number that this CPU */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 323 | /* is aware of having started. */ |
Paul E. McKenney | 5cd3719 | 2014-12-13 20:32:04 -0800 | [diff] [blame] | 324 | unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */ |
| 325 | /* for rcu_all_qs() invocations. */ |
Paul E. McKenney | 5b74c45 | 2015-08-06 15:16:57 -0700 | [diff] [blame] | 326 | union rcu_noqs cpu_no_qs; /* No QSes yet for this CPU. */ |
Paul E. McKenney | 97c668b | 2015-08-06 11:31:51 -0700 | [diff] [blame] | 327 | bool core_needs_qs; /* Core waits for quiesc state. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 328 | bool beenonline; /* CPU online at least once. */ |
Paul E. McKenney | e3663b1 | 2014-12-08 20:26:55 -0800 | [diff] [blame] | 329 | bool gpwrap; /* Possible gpnum/completed wrap. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 330 | struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ |
| 331 | unsigned long grpmask; /* Mask to apply to leaf qsmask. */ |
Paul E. McKenney | a858af2 | 2012-01-16 13:29:10 -0800 | [diff] [blame] | 332 | unsigned long ticks_this_gp; /* The number of scheduling-clock */ |
| 333 | /* ticks this CPU has handled */ |
| 334 | /* during and after the last grace */ |
| 335 | /* period it is aware of. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 336 | |
| 337 | /* 2) batch handling */ |
| 338 | /* |
| 339 | * If nxtlist is not NULL, it is partitioned as follows. |
| 340 | * Any of the partitions might be empty, in which case the |
| 341 | * pointer to that partition will be equal to the pointer for |
| 342 | * the following partition. When the list is empty, all of |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 343 | * the nxttail elements point to the ->nxtlist pointer itself, |
| 344 | * which in that case is NULL. |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 345 | * |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 346 | * [nxtlist, *nxttail[RCU_DONE_TAIL]): |
| 347 | * Entries that batch # <= ->completed |
| 348 | * The grace period for these entries has completed, and |
| 349 | * the other grace-period-completed entries may be moved |
| 350 | * here temporarily in rcu_process_callbacks(). |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 351 | * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): |
| 352 | * Entries that batch # <= ->completed - 1: waiting for current GP |
| 353 | * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): |
| 354 | * Entries known to have arrived before current GP ended |
| 355 | * [*nxttail[RCU_NEXT_READY_TAIL], *nxttail[RCU_NEXT_TAIL]): |
| 356 | * Entries that might have arrived after current GP ended |
| 357 | * Note that the value of *nxttail[RCU_NEXT_TAIL] will |
| 358 | * always be NULL, as this is the end of the list. |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 359 | */ |
| 360 | struct rcu_head *nxtlist; |
| 361 | struct rcu_head **nxttail[RCU_NEXT_SIZE]; |
Paul E. McKenney | dc35c89 | 2012-12-03 13:52:00 -0800 | [diff] [blame] | 362 | unsigned long nxtcompleted[RCU_NEXT_SIZE]; |
| 363 | /* grace periods for sublists. */ |
Paul E. McKenney | 486e259 | 2012-01-06 14:11:30 -0800 | [diff] [blame] | 364 | long qlen_lazy; /* # of lazy queued callbacks */ |
| 365 | long qlen; /* # of queued callbacks, incl lazy */ |
Paul E. McKenney | 37c72e5 | 2009-10-14 10:15:55 -0700 | [diff] [blame] | 366 | long qlen_last_fqs_check; |
| 367 | /* qlen at last check for QS forcing */ |
Paul E. McKenney | 269dcc1 | 2010-09-07 14:23:09 -0700 | [diff] [blame] | 368 | unsigned long n_cbs_invoked; /* count of RCU cbs invoked. */ |
Paul E. McKenney | c635a4e1 | 2012-10-29 07:29:20 -0700 | [diff] [blame] | 369 | unsigned long n_nocbs_invoked; /* count of no-CBs RCU cbs invoked. */ |
Lai Jiangshan | 29494be | 2010-10-20 14:13:06 +0800 | [diff] [blame] | 370 | unsigned long n_cbs_orphaned; /* RCU cbs orphaned by dying CPU */ |
| 371 | unsigned long n_cbs_adopted; /* RCU cbs adopted from dying CPU */ |
Paul E. McKenney | 37c72e5 | 2009-10-14 10:15:55 -0700 | [diff] [blame] | 372 | unsigned long n_force_qs_snap; |
| 373 | /* did other CPU force QS recently? */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 374 | long blimit; /* Upper limit on a processed batch */ |
| 375 | |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 376 | /* 3) dynticks interface. */ |
| 377 | struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ |
| 378 | int dynticks_snap; /* Per-GP tracking for dynticks. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 379 | |
| 380 | /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 381 | unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 382 | unsigned long offline_fqs; /* Kicked due to being offline. */ |
Paul E. McKenney | 4a81e83 | 2014-06-20 16:49:01 -0700 | [diff] [blame] | 383 | unsigned long cond_resched_completed; |
| 384 | /* Grace period that needs help */ |
| 385 | /* from cond_resched(). */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 386 | |
| 387 | /* 5) __rcu_pending() statistics. */ |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 388 | unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ |
Paul E. McKenney | 97c668b | 2015-08-06 11:31:51 -0700 | [diff] [blame] | 389 | unsigned long n_rp_core_needs_qs; |
Paul E. McKenney | d21670a | 2010-04-14 17:39:26 -0700 | [diff] [blame] | 390 | unsigned long n_rp_report_qs; |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 391 | unsigned long n_rp_cb_ready; |
| 392 | unsigned long n_rp_cpu_needs_gp; |
| 393 | unsigned long n_rp_gp_completed; |
| 394 | unsigned long n_rp_gp_started; |
Paul E. McKenney | 96d3fd0 | 2013-10-04 14:33:34 -0700 | [diff] [blame] | 395 | unsigned long n_rp_nocb_defer_wakeup; |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 396 | unsigned long n_rp_need_nothing; |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 397 | |
Paul E. McKenney | 2cd6ffa | 2015-06-29 17:06:39 -0700 | [diff] [blame] | 398 | /* 6) _rcu_barrier(), OOM callbacks, and expediting. */ |
Paul E. McKenney | 06668ef | 2012-05-28 23:57:46 -0700 | [diff] [blame] | 399 | struct rcu_head barrier_head; |
Paul E. McKenney | b626c1b | 2012-06-11 17:39:43 -0700 | [diff] [blame] | 400 | #ifdef CONFIG_RCU_FAST_NO_HZ |
| 401 | struct rcu_head oom_head; |
| 402 | #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
Paul E. McKenney | 8b355e3 | 2016-06-29 13:46:25 -0700 | [diff] [blame] | 403 | atomic_long_t exp_workdone0; /* # done by workqueue. */ |
Paul E. McKenney | d40a4f0 | 2016-03-08 14:43:44 -0800 | [diff] [blame] | 404 | atomic_long_t exp_workdone1; /* # done by others #1. */ |
| 405 | atomic_long_t exp_workdone2; /* # done by others #2. */ |
| 406 | atomic_long_t exp_workdone3; /* # done by others #3. */ |
Paul E. McKenney | 0742ac3 | 2016-10-11 06:09:59 -0700 | [diff] [blame] | 407 | int exp_dynticks_snap; /* Double-check need for IPI. */ |
Paul E. McKenney | 06668ef | 2012-05-28 23:57:46 -0700 | [diff] [blame] | 408 | |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 409 | /* 7) Callback offloading. */ |
| 410 | #ifdef CONFIG_RCU_NOCB_CPU |
| 411 | struct rcu_head *nocb_head; /* CBs waiting for kthread. */ |
| 412 | struct rcu_head **nocb_tail; |
Paul E. McKenney | 41050a0 | 2014-12-18 12:31:27 -0800 | [diff] [blame] | 413 | atomic_long_t nocb_q_count; /* # CBs waiting for nocb */ |
| 414 | atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ |
Paul E. McKenney | fbce749 | 2014-06-24 09:26:11 -0700 | [diff] [blame] | 415 | struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ |
| 416 | struct rcu_head **nocb_follower_tail; |
Paul Gortmaker | abedf8e | 2016-02-19 09:46:41 +0100 | [diff] [blame] | 417 | struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */ |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 418 | struct task_struct *nocb_kthread; |
Paul E. McKenney | 9fdd3bc | 2014-07-29 14:50:47 -0700 | [diff] [blame] | 419 | int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ |
Paul E. McKenney | fbce749 | 2014-06-24 09:26:11 -0700 | [diff] [blame] | 420 | |
| 421 | /* The following fields are used by the leader, hence own cacheline. */ |
| 422 | struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp; |
| 423 | /* CBs waiting for GP. */ |
| 424 | struct rcu_head **nocb_gp_tail; |
Pranith Kumar | 11ed7f9 | 2014-08-27 16:43:40 -0400 | [diff] [blame] | 425 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ |
Paul E. McKenney | fbce749 | 2014-06-24 09:26:11 -0700 | [diff] [blame] | 426 | struct rcu_data *nocb_next_follower; |
| 427 | /* Next follower in wakeup chain. */ |
| 428 | |
| 429 | /* The following fields are used by the follower, hence new cachline. */ |
| 430 | struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp; |
| 431 | /* Leader CPU takes GP-end wakeups. */ |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 432 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
| 433 | |
Paul E. McKenney | 6231069 | 2013-03-06 13:37:09 -0800 | [diff] [blame] | 434 | /* 8) RCU CPU stall data. */ |
Paul E. McKenney | 6231069 | 2013-03-06 13:37:09 -0800 | [diff] [blame] | 435 | unsigned int softirq_snap; /* Snapshot of softirq activity. */ |
Paul E. McKenney | 6231069 | 2013-03-06 13:37:09 -0800 | [diff] [blame] | 436 | |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 437 | int cpu; |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 438 | struct rcu_state *rsp; |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 439 | }; |
| 440 | |
Paul E. McKenney | 9fdd3bc | 2014-07-29 14:50:47 -0700 | [diff] [blame] | 441 | /* Values for nocb_defer_wakeup field in struct rcu_data. */ |
| 442 | #define RCU_NOGP_WAKE_NOT 0 |
| 443 | #define RCU_NOGP_WAKE 1 |
| 444 | #define RCU_NOGP_WAKE_FORCE 2 |
| 445 | |
Paul E. McKenney | 026ad28 | 2013-04-03 22:14:11 -0700 | [diff] [blame] | 446 | #define RCU_JIFFIES_TILL_FORCE_QS (1 + (HZ > 250) + (HZ > 500)) |
| 447 | /* For jiffies_till_first_fqs and */ |
| 448 | /* and jiffies_till_next_fqs. */ |
Paul E. McKenney | 007b092 | 2010-03-05 15:03:26 -0800 | [diff] [blame] | 449 | |
Paul E. McKenney | 026ad28 | 2013-04-03 22:14:11 -0700 | [diff] [blame] | 450 | #define RCU_JIFFIES_FQS_DIV 256 /* Very large systems need more */ |
| 451 | /* delay between bouts of */ |
| 452 | /* quiescent-state forcing. */ |
| 453 | |
| 454 | #define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time to take */ |
| 455 | /* at least one scheduling clock */ |
| 456 | /* irq before ratting on them. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 457 | |
Peter Zijlstra | 08bca60 | 2011-05-20 16:06:29 -0700 | [diff] [blame] | 458 | #define rcu_wait(cond) \ |
| 459 | do { \ |
| 460 | for (;;) { \ |
| 461 | set_current_state(TASK_INTERRUPTIBLE); \ |
| 462 | if (cond) \ |
| 463 | break; \ |
| 464 | schedule(); \ |
| 465 | } \ |
| 466 | __set_current_state(TASK_RUNNING); \ |
| 467 | } while (0) |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 468 | |
| 469 | /* |
| 470 | * RCU global state, including node hierarchy. This hierarchy is |
| 471 | * represented in "heap" form in a dense array. The root (first level) |
| 472 | * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second |
| 473 | * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), |
| 474 | * and the third level in ->node[m+1] and following (->node[m+1] referenced |
| 475 | * by ->level[2]). The number of levels is determined by the number of |
| 476 | * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" |
| 477 | * consisting of a single rcu_node. |
| 478 | */ |
| 479 | struct rcu_state { |
| 480 | struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ |
Alexander Gordeev | 032dfc8 | 2015-07-09 15:34:23 +0200 | [diff] [blame] | 481 | struct rcu_node *level[RCU_NUM_LVLS + 1]; |
| 482 | /* Hierarchy levels (+1 to */ |
| 483 | /* shut bogus gcc warning) */ |
Paul E. McKenney | 4a81e83 | 2014-06-20 16:49:01 -0700 | [diff] [blame] | 484 | u8 flavor_mask; /* bit in flavor mask. */ |
Lai Jiangshan | 394f99a | 2010-06-28 16:25:04 +0800 | [diff] [blame] | 485 | struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ |
Boqun Feng | db3e8db | 2015-07-29 13:29:39 +0800 | [diff] [blame] | 486 | call_rcu_func_t call; /* call_rcu() flavor. */ |
Paul E. McKenney | b9585e9 | 2015-07-31 16:04:45 -0700 | [diff] [blame] | 487 | int ncpus; /* # CPUs seen so far. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 488 | |
| 489 | /* The following fields are guarded by the root rcu_node's lock. */ |
| 490 | |
Petr Mladek | 77f81fe | 2015-09-09 12:09:49 -0700 | [diff] [blame] | 491 | u8 boost ____cacheline_internodealigned_in_smp; |
| 492 | /* Subject to priority boost. */ |
Paul E. McKenney | 20133cf | 2010-02-22 17:05:01 -0800 | [diff] [blame] | 493 | unsigned long gpnum; /* Current gp number. */ |
| 494 | unsigned long completed; /* # of last completed gp. */ |
Paul E. McKenney | b3dbec7 | 2012-06-18 18:36:08 -0700 | [diff] [blame] | 495 | struct task_struct *gp_kthread; /* Task for grace periods. */ |
Paul Gortmaker | abedf8e | 2016-02-19 09:46:41 +0100 | [diff] [blame] | 496 | struct swait_queue_head gp_wq; /* Where GP task waits. */ |
Paul E. McKenney | afea227 | 2014-03-12 07:10:41 -0700 | [diff] [blame] | 497 | short gp_flags; /* Commands for GP task. */ |
| 498 | short gp_state; /* GP kthread sleep state. */ |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 499 | |
Paul E. McKenney | d9a3da0 | 2009-12-02 12:10:15 -0800 | [diff] [blame] | 500 | /* End of fields guarded by root rcu_node's lock. */ |
Paul E. McKenney | 1eba8f8 | 2009-09-23 09:50:42 -0700 | [diff] [blame] | 501 | |
Paul E. McKenney | 7b2e601 | 2012-10-08 10:54:03 -0700 | [diff] [blame] | 502 | raw_spinlock_t orphan_lock ____cacheline_internodealigned_in_smp; |
| 503 | /* Protect following fields. */ |
Paul E. McKenney | b1420f1 | 2012-03-01 13:18:08 -0800 | [diff] [blame] | 504 | struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ |
| 505 | /* need a grace period. */ |
| 506 | struct rcu_head **orphan_nxttail; /* Tail of above. */ |
| 507 | struct rcu_head *orphan_donelist; /* Orphaned callbacks that */ |
| 508 | /* are ready to invoke. */ |
| 509 | struct rcu_head **orphan_donetail; /* Tail of above. */ |
| 510 | long qlen_lazy; /* Number of lazy callbacks. */ |
| 511 | long qlen; /* Total number of callbacks. */ |
Paul E. McKenney | 7b2e601 | 2012-10-08 10:54:03 -0700 | [diff] [blame] | 512 | /* End of fields guarded by orphan_lock. */ |
Paul E. McKenney | a4fbe35 | 2012-10-07 08:36:12 -0700 | [diff] [blame] | 513 | |
Paul E. McKenney | 7be7f0b | 2012-05-29 05:18:53 -0700 | [diff] [blame] | 514 | struct mutex barrier_mutex; /* Guards barrier fields. */ |
Paul E. McKenney | 24ebbca | 2012-05-29 00:34:56 -0700 | [diff] [blame] | 515 | atomic_t barrier_cpu_count; /* # CPUs waiting on. */ |
Paul E. McKenney | 7db74df | 2012-05-29 03:03:37 -0700 | [diff] [blame] | 516 | struct completion barrier_completion; /* Wake at barrier end. */ |
Paul E. McKenney | 4f525a5 | 2015-06-26 11:20:00 -0700 | [diff] [blame] | 517 | unsigned long barrier_sequence; /* ++ at start and end of */ |
Paul E. McKenney | cf3a9c4 | 2012-05-29 14:56:46 -0700 | [diff] [blame] | 518 | /* _rcu_barrier(). */ |
Paul E. McKenney | a4fbe35 | 2012-10-07 08:36:12 -0700 | [diff] [blame] | 519 | /* End of fields guarded by barrier_mutex. */ |
| 520 | |
Paul E. McKenney | f6a12f3 | 2016-01-30 17:57:35 -0800 | [diff] [blame] | 521 | struct mutex exp_mutex; /* Serialize expedited GP. */ |
Paul E. McKenney | 3b5f668 | 2016-03-16 16:47:55 -0700 | [diff] [blame] | 522 | struct mutex exp_wake_mutex; /* Serialize wakeup. */ |
Paul E. McKenney | d6ada2c | 2015-06-24 10:46:30 -0700 | [diff] [blame] | 523 | unsigned long expedited_sequence; /* Take a ticket. */ |
Paul E. McKenney | a30489c | 2012-10-11 16:18:09 -0700 | [diff] [blame] | 524 | atomic_long_t expedited_normal; /* # fallbacks to normal. */ |
Peter Zijlstra | 3a6d7c6 | 2015-06-25 11:27:10 -0700 | [diff] [blame] | 525 | atomic_t expedited_need_qs; /* # CPUs left to check in. */ |
Paul Gortmaker | abedf8e | 2016-02-19 09:46:41 +0100 | [diff] [blame] | 526 | struct swait_queue_head expedited_wq; /* Wait for check-ins. */ |
Paul E. McKenney | b9585e9 | 2015-07-31 16:04:45 -0700 | [diff] [blame] | 527 | int ncpus_snap; /* # CPUs seen last time. */ |
Paul E. McKenney | 40694d6 | 2012-10-11 15:24:03 -0700 | [diff] [blame] | 528 | |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 529 | unsigned long jiffies_force_qs; /* Time at which to invoke */ |
| 530 | /* force_quiescent_state(). */ |
Paul E. McKenney | 8c7c482 | 2016-01-03 20:29:57 -0800 | [diff] [blame] | 531 | unsigned long jiffies_kick_kthreads; /* Time at which to kick */ |
| 532 | /* kthreads, if configured. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 533 | unsigned long n_force_qs; /* Number of calls to */ |
| 534 | /* force_quiescent_state(). */ |
| 535 | unsigned long n_force_qs_lh; /* ~Number of calls leaving */ |
| 536 | /* due to lock unavailable. */ |
| 537 | unsigned long n_force_qs_ngp; /* Number of calls leaving */ |
| 538 | /* due to no GP active. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 539 | unsigned long gp_start; /* Time at which GP started, */ |
| 540 | /* but in jiffies. */ |
Paul E. McKenney | 6ccd2ec | 2014-12-11 10:20:59 -0800 | [diff] [blame] | 541 | unsigned long gp_activity; /* Time of last GP kthread */ |
| 542 | /* activity in jiffies. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 543 | unsigned long jiffies_stall; /* Time at which to check */ |
| 544 | /* for CPU stalls. */ |
Paul E. McKenney | 6193c76 | 2013-09-23 13:57:18 -0700 | [diff] [blame] | 545 | unsigned long jiffies_resched; /* Time at which to resched */ |
| 546 | /* a reluctant CPU. */ |
Paul E. McKenney | fc908ed | 2014-12-08 09:57:48 -0800 | [diff] [blame] | 547 | unsigned long n_force_qs_gpstart; /* Snapshot of n_force_qs at */ |
| 548 | /* GP start. */ |
Paul E. McKenney | 15ba0ba | 2011-04-06 16:01:16 -0700 | [diff] [blame] | 549 | unsigned long gp_max; /* Maximum GP duration in */ |
| 550 | /* jiffies. */ |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 551 | const char *name; /* Name of structure. */ |
Paul E. McKenney | a488985 | 2012-12-03 08:16:28 -0800 | [diff] [blame] | 552 | char abbr; /* Abbreviated name. */ |
Paul E. McKenney | 6ce75a2 | 2012-06-12 11:01:13 -0700 | [diff] [blame] | 553 | struct list_head flavors; /* List of RCU flavors. */ |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 554 | }; |
| 555 | |
Paul E. McKenney | 4cdfc175 | 2012-06-22 17:06:26 -0700 | [diff] [blame] | 556 | /* Values for rcu_state structure's gp_flags field. */ |
| 557 | #define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ |
| 558 | #define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ |
| 559 | |
Paul E. McKenney | c34d2f4 | 2015-09-10 11:21:28 -0700 | [diff] [blame] | 560 | /* Values for rcu_state structure's gp_state field. */ |
Petr Mladek | 77f81fe | 2015-09-09 12:09:49 -0700 | [diff] [blame] | 561 | #define RCU_GP_IDLE 0 /* Initial state and no GP in progress. */ |
Paul E. McKenney | afea227 | 2014-03-12 07:10:41 -0700 | [diff] [blame] | 562 | #define RCU_GP_WAIT_GPS 1 /* Wait for grace-period start. */ |
Paul E. McKenney | 319362c | 2015-05-19 14:16:52 -0700 | [diff] [blame] | 563 | #define RCU_GP_DONE_GPS 2 /* Wait done for grace-period start. */ |
| 564 | #define RCU_GP_WAIT_FQS 3 /* Wait for force-quiescent-state time. */ |
Paul E. McKenney | 32bb1c7 | 2015-07-02 12:27:31 -0700 | [diff] [blame] | 565 | #define RCU_GP_DOING_FQS 4 /* Wait done for force-quiescent-state time. */ |
Paul E. McKenney | 319362c | 2015-05-19 14:16:52 -0700 | [diff] [blame] | 566 | #define RCU_GP_CLEANUP 5 /* Grace-period cleanup started. */ |
| 567 | #define RCU_GP_CLEANED 6 /* Grace-period cleanup complete. */ |
Paul E. McKenney | afea227 | 2014-03-12 07:10:41 -0700 | [diff] [blame] | 568 | |
Paul E. McKenney | 6b50e11 | 2015-11-17 14:39:26 -0800 | [diff] [blame] | 569 | #ifndef RCU_TREE_NONCORE |
| 570 | static const char * const gp_state_names[] = { |
| 571 | "RCU_GP_IDLE", |
| 572 | "RCU_GP_WAIT_GPS", |
| 573 | "RCU_GP_DONE_GPS", |
| 574 | "RCU_GP_WAIT_FQS", |
| 575 | "RCU_GP_DOING_FQS", |
| 576 | "RCU_GP_CLEANUP", |
| 577 | "RCU_GP_CLEANED", |
| 578 | }; |
| 579 | #endif /* #ifndef RCU_TREE_NONCORE */ |
| 580 | |
Paul E. McKenney | 6ce75a2 | 2012-06-12 11:01:13 -0700 | [diff] [blame] | 581 | extern struct list_head rcu_struct_flavors; |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 582 | |
| 583 | /* Sequence through rcu_state structures for each RCU flavor. */ |
Paul E. McKenney | 6ce75a2 | 2012-06-12 11:01:13 -0700 | [diff] [blame] | 584 | #define for_each_rcu_flavor(rsp) \ |
| 585 | list_for_each_entry((rsp), &rcu_struct_flavors, flavors) |
| 586 | |
Ingo Molnar | 6258c4f | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 587 | /* |
| 588 | * RCU implementation internal declarations: |
| 589 | */ |
Paul E. McKenney | d6714c2 | 2009-08-22 13:56:46 -0700 | [diff] [blame] | 590 | extern struct rcu_state rcu_sched_state; |
Ingo Molnar | 6258c4f | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 591 | |
| 592 | extern struct rcu_state rcu_bh_state; |
Ingo Molnar | 6258c4f | 2009-03-25 16:42:24 +0100 | [diff] [blame] | 593 | |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 594 | #ifdef CONFIG_PREEMPT_RCU |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 595 | extern struct rcu_state rcu_preempt_state; |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 596 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | f41d911 | 2009-08-22 13:56:52 -0700 | [diff] [blame] | 597 | |
Paul E. McKenney | eab0993 | 2011-06-21 01:59:33 -0700 | [diff] [blame] | 598 | #ifdef CONFIG_RCU_BOOST |
| 599 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
| 600 | DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); |
| 601 | DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
| 602 | DECLARE_PER_CPU(char, rcu_cpu_has_work); |
| 603 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
| 604 | |
Paul E. McKenney | 017c426 | 2010-01-14 16:10:58 -0800 | [diff] [blame] | 605 | #ifndef RCU_TREE_NONCORE |
Paul E. McKenney | 9f77da9 | 2009-08-22 13:56:45 -0700 | [diff] [blame] | 606 | |
Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 607 | /* Forward declarations for rcutree_plugin.h */ |
Paul E. McKenney | dbe0135 | 2009-11-10 13:37:19 -0800 | [diff] [blame] | 608 | static void rcu_bootup_announce(void); |
Paul E. McKenney | 38200cf | 2014-10-21 12:50:04 -0700 | [diff] [blame] | 609 | static void rcu_preempt_note_context_switch(void); |
Paul E. McKenney | 27f4d28 | 2011-02-07 12:47:15 -0800 | [diff] [blame] | 610 | static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); |
Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 611 | #ifdef CONFIG_HOTPLUG_CPU |
Paul E. McKenney | 8af3a5e | 2014-10-31 11:22:37 -0700 | [diff] [blame] | 612 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
Paul E. McKenney | b668c9c | 2009-11-22 08:53:48 -0800 | [diff] [blame] | 613 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ |
Paul E. McKenney | 1ed509a | 2010-02-22 17:05:05 -0800 | [diff] [blame] | 614 | static void rcu_print_detail_task_stall(struct rcu_state *rsp); |
Paul E. McKenney | 9bc8b55 | 2011-08-13 13:31:47 -0700 | [diff] [blame] | 615 | static int rcu_print_task_stall(struct rcu_node *rnp); |
Paul E. McKenney | 74611ec | 2015-08-18 10:20:43 -0700 | [diff] [blame] | 616 | static int rcu_print_task_exp_stall(struct rcu_node *rnp); |
Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 617 | static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); |
Paul E. McKenney | 86aea0e | 2014-10-21 08:12:00 -0700 | [diff] [blame] | 618 | static void rcu_preempt_check_callbacks(void); |
Boqun Feng | b6a4ae7 | 2015-07-29 13:29:38 +0800 | [diff] [blame] | 619 | void call_rcu(struct rcu_head *head, rcu_callback_t func); |
Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 620 | static void __init __rcu_init_preempt(void); |
Paul E. McKenney | 1217ed1 | 2011-05-04 21:43:49 -0700 | [diff] [blame] | 621 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 622 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
| 623 | static void invoke_rcu_callbacks_kthread(void); |
Paul E. McKenney | dff1672 | 2011-11-29 15:57:13 -0800 | [diff] [blame] | 624 | static bool rcu_is_callbacks_kthread(void); |
Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 625 | #ifdef CONFIG_RCU_BOOST |
| 626 | static void rcu_preempt_do_callbacks(void); |
Paul Gortmaker | 49fb4c6 | 2013-06-19 14:52:21 -0400 | [diff] [blame] | 627 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
Thomas Gleixner | 5d01bbd | 2012-07-16 10:42:35 +0000 | [diff] [blame] | 628 | struct rcu_node *rnp); |
Paul E. McKenney | a46e089 | 2011-06-15 15:47:09 -0700 | [diff] [blame] | 629 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
Paul E. McKenney | 9386c0b | 2014-07-13 12:00:53 -0700 | [diff] [blame] | 630 | static void __init rcu_spawn_boost_kthreads(void); |
Paul Gortmaker | 49fb4c6 | 2013-06-19 14:52:21 -0400 | [diff] [blame] | 631 | static void rcu_prepare_kthreads(int cpu); |
Paul E. McKenney | 8fa7845 | 2014-10-22 15:07:37 -0700 | [diff] [blame] | 632 | static void rcu_cleanup_after_idle(void); |
Paul E. McKenney | 198bbf8 | 2014-10-22 15:03:43 -0700 | [diff] [blame] | 633 | static void rcu_prepare_for_idle(void); |
Paul E. McKenney | c57afe8 | 2012-02-28 11:02:21 -0800 | [diff] [blame] | 634 | static void rcu_idle_count_callbacks_posted(void); |
Paul E. McKenney | 0aa04b0 | 2015-01-23 21:52:37 -0800 | [diff] [blame] | 635 | static bool rcu_preempt_has_tasks(struct rcu_node *rnp); |
Paul E. McKenney | a858af2 | 2012-01-16 13:29:10 -0800 | [diff] [blame] | 636 | static void print_cpu_stall_info_begin(void); |
| 637 | static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); |
| 638 | static void print_cpu_stall_info_end(void); |
| 639 | static void zero_cpu_stall_ticks(struct rcu_data *rdp); |
| 640 | static void increment_cpu_stall_ticks(void); |
Paul E. McKenney | d7e2993 | 2014-10-27 09:15:54 -0700 | [diff] [blame] | 641 | static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); |
Paul E. McKenney | dae6e64 | 2013-02-10 20:48:58 -0800 | [diff] [blame] | 642 | static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); |
Paul Gortmaker | abedf8e | 2016-02-19 09:46:41 +0100 | [diff] [blame] | 643 | static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); |
| 644 | static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); |
Paul E. McKenney | dae6e64 | 2013-02-10 20:48:58 -0800 | [diff] [blame] | 645 | static void rcu_init_one_nocb(struct rcu_node *rnp); |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 646 | static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, |
Paul E. McKenney | 96d3fd0 | 2013-10-04 14:33:34 -0700 | [diff] [blame] | 647 | bool lazy, unsigned long flags); |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 648 | static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, |
Paul E. McKenney | 96d3fd0 | 2013-10-04 14:33:34 -0700 | [diff] [blame] | 649 | struct rcu_data *rdp, |
| 650 | unsigned long flags); |
Paul E. McKenney | 9fdd3bc | 2014-07-29 14:50:47 -0700 | [diff] [blame] | 651 | static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); |
Paul E. McKenney | 96d3fd0 | 2013-10-04 14:33:34 -0700 | [diff] [blame] | 652 | static void do_nocb_deferred_wakeup(struct rcu_data *rdp); |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 653 | static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); |
Paul E. McKenney | 35ce7f2 | 2014-07-11 11:30:24 -0700 | [diff] [blame] | 654 | static void rcu_spawn_all_nocb_kthreads(int cpu); |
| 655 | static void __init rcu_spawn_nocb_kthreads(void); |
| 656 | #ifdef CONFIG_RCU_NOCB_CPU |
| 657 | static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); |
| 658 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
Paul E. McKenney | 4a81e83 | 2014-06-20 16:49:01 -0700 | [diff] [blame] | 659 | static void __maybe_unused rcu_kick_nohz_cpu(int cpu); |
Paul E. McKenney | 34ed6246 | 2013-01-07 13:37:42 -0800 | [diff] [blame] | 660 | static bool init_nocb_callback_list(struct rcu_data *rdp); |
Christoph Lameter | 28ced79 | 2014-09-02 14:13:44 -0700 | [diff] [blame] | 661 | static void rcu_sysidle_enter(int irq); |
| 662 | static void rcu_sysidle_exit(int irq); |
Paul E. McKenney | 0edd1b1 | 2013-06-21 16:37:22 -0700 | [diff] [blame] | 663 | static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle, |
| 664 | unsigned long *maxj); |
| 665 | static bool is_sysidle_rcu_state(struct rcu_state *rsp); |
| 666 | static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle, |
| 667 | unsigned long maxj); |
Paul E. McKenney | eb75767 | 2013-06-21 17:10:40 -0700 | [diff] [blame] | 668 | static void rcu_bind_gp_kthread(void); |
Paul E. McKenney | 2333210 | 2013-06-21 12:34:33 -0700 | [diff] [blame] | 669 | static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp); |
Paul E. McKenney | a096932 | 2013-11-08 09:03:10 -0800 | [diff] [blame] | 670 | static bool rcu_nohz_full_cpu(struct rcu_state *rsp); |
Paul E. McKenney | 176f8f7 | 2014-08-04 17:43:50 -0700 | [diff] [blame] | 671 | static void rcu_dynticks_task_enter(void); |
| 672 | static void rcu_dynticks_task_exit(void); |
Paul E. McKenney | 9b2619a | 2009-09-23 09:50:43 -0700 | [diff] [blame] | 673 | |
Paul E. McKenney | 017c426 | 2010-01-14 16:10:58 -0800 | [diff] [blame] | 674 | #endif /* #ifndef RCU_TREE_NONCORE */ |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 675 | |
| 676 | #ifdef CONFIG_RCU_TRACE |
Paul E. McKenney | 41050a0 | 2014-12-18 12:31:27 -0800 | [diff] [blame] | 677 | /* Read out queue lengths for tracing. */ |
| 678 | static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll) |
| 679 | { |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 680 | #ifdef CONFIG_RCU_NOCB_CPU |
Paul E. McKenney | 41050a0 | 2014-12-18 12:31:27 -0800 | [diff] [blame] | 681 | *ql = atomic_long_read(&rdp->nocb_q_count); |
| 682 | *qll = atomic_long_read(&rdp->nocb_q_count_lazy); |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 683 | #else /* #ifdef CONFIG_RCU_NOCB_CPU */ |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 684 | *ql = 0; |
| 685 | *qll = 0; |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 686 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
Paul E. McKenney | 41050a0 | 2014-12-18 12:31:27 -0800 | [diff] [blame] | 687 | } |
Paul E. McKenney | 3fbfbf7 | 2012-08-19 21:35:53 -0700 | [diff] [blame] | 688 | #endif /* #ifdef CONFIG_RCU_TRACE */ |
Paul E. McKenney | 12d560f | 2015-07-14 18:35:23 -0700 | [diff] [blame] | 689 | |
| 690 | /* |
| 691 | * Place this after a lock-acquisition primitive to guarantee that |
| 692 | * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies |
| 693 | * if the UNLOCK and LOCK are executed by the same CPU or if the |
| 694 | * UNLOCK and LOCK operate on the same lock variable. |
| 695 | */ |
| 696 | #ifdef CONFIG_PPC |
| 697 | #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ |
| 698 | #else /* #ifdef CONFIG_PPC */ |
| 699 | #define smp_mb__after_unlock_lock() do { } while (0) |
| 700 | #endif /* #else #ifdef CONFIG_PPC */ |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 701 | |
| 702 | /* |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 703 | * Wrappers for the rcu_node::lock acquire and release. |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 704 | * |
| 705 | * Because the rcu_nodes form a tree, the tree traversal locking will observe |
| 706 | * different lock values, this in turn means that an UNLOCK of one level |
| 707 | * followed by a LOCK of another level does not imply a full memory barrier; |
| 708 | * and most importantly transitivity is lost. |
| 709 | * |
| 710 | * In order to restore full ordering between tree levels, augment the regular |
| 711 | * lock acquire functions with smp_mb__after_unlock_lock(). |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 712 | * |
| 713 | * As ->lock of struct rcu_node is a __private field, therefore one should use |
| 714 | * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock. |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 715 | */ |
| 716 | static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp) |
| 717 | { |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 718 | raw_spin_lock(&ACCESS_PRIVATE(rnp, lock)); |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 719 | smp_mb__after_unlock_lock(); |
| 720 | } |
| 721 | |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 722 | static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp) |
| 723 | { |
| 724 | raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock)); |
| 725 | } |
| 726 | |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 727 | static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp) |
| 728 | { |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 729 | raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock)); |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 730 | smp_mb__after_unlock_lock(); |
| 731 | } |
| 732 | |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 733 | static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp) |
| 734 | { |
| 735 | raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock)); |
| 736 | } |
| 737 | |
| 738 | #define raw_spin_lock_irqsave_rcu_node(rnp, flags) \ |
| 739 | do { \ |
| 740 | typecheck(unsigned long, flags); \ |
| 741 | raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \ |
| 742 | smp_mb__after_unlock_lock(); \ |
| 743 | } while (0) |
| 744 | |
| 745 | #define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \ |
| 746 | do { \ |
| 747 | typecheck(unsigned long, flags); \ |
| 748 | raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \ |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 749 | } while (0) |
| 750 | |
| 751 | static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp) |
| 752 | { |
Boqun Feng | 67c583a7 | 2015-12-29 12:18:47 +0800 | [diff] [blame] | 753 | bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock)); |
Peter Zijlstra | 2a67e74 | 2015-10-08 12:24:23 +0200 | [diff] [blame] | 754 | |
| 755 | if (locked) |
| 756 | smp_mb__after_unlock_lock(); |
| 757 | return locked; |
| 758 | } |