blob: 04fc2ed71af8e9d62dae4b1a74499d1da0b25bed [file] [log] [blame]
Paul E. McKenneyd8be8172017-03-25 09:59:38 -07001/*
2 * Sleepable Read-Copy Update mechanism for mutual exclusion,
3 * tiny version for non-preemptible single-CPU use.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, you can access it online at
17 * http://www.gnu.org/licenses/gpl-2.0.html.
18 *
19 * Copyright (C) IBM Corporation, 2017
20 *
21 * Author: Paul McKenney <paulmck@us.ibm.com>
22 */
23
24#include <linux/export.h>
25#include <linux/mutex.h>
26#include <linux/preempt.h>
27#include <linux/rcupdate_wait.h>
28#include <linux/sched.h>
29#include <linux/delay.h>
30#include <linux/srcu.h>
31
32#include <linux/rcu_node_tree.h>
Ingo Molnar45753c52017-05-02 10:31:18 +020033#include "rcu_segcblist.h"
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070034#include "rcu.h"
35
Paul E. McKenney825c5bd2017-05-26 16:16:40 -070036int rcu_scheduler_active __read_mostly;
37
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070038static int init_srcu_struct_fields(struct srcu_struct *sp)
39{
40 sp->srcu_lock_nesting[0] = 0;
41 sp->srcu_lock_nesting[1] = 0;
42 init_swait_queue_head(&sp->srcu_wq);
Paul E. McKenney2464dd92017-05-04 14:29:16 -070043 sp->srcu_cb_head = NULL;
44 sp->srcu_cb_tail = &sp->srcu_cb_head;
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070045 sp->srcu_gp_running = false;
46 sp->srcu_gp_waiting = false;
47 sp->srcu_idx = 0;
48 INIT_WORK(&sp->srcu_work, srcu_drive_gp);
49 return 0;
50}
51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53
54int __init_srcu_struct(struct srcu_struct *sp, const char *name,
55 struct lock_class_key *key)
56{
57 /* Don't re-initialize a lock while it is held. */
58 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
59 lockdep_init_map(&sp->dep_map, name, key, 0);
60 return init_srcu_struct_fields(sp);
61}
62EXPORT_SYMBOL_GPL(__init_srcu_struct);
63
64#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
65
66/*
67 * init_srcu_struct - initialize a sleep-RCU structure
68 * @sp: structure to initialize.
69 *
70 * Must invoke this on a given srcu_struct before passing that srcu_struct
71 * to any other function. Each srcu_struct represents a separate domain
72 * of SRCU protection.
73 */
74int init_srcu_struct(struct srcu_struct *sp)
75{
76 return init_srcu_struct_fields(sp);
77}
78EXPORT_SYMBOL_GPL(init_srcu_struct);
79
80#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
81
82/*
83 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
84 * @sp: structure to clean up.
85 *
86 * Must invoke this after you are finished using a given srcu_struct that
87 * was initialized via init_srcu_struct(), else you leak memory.
88 */
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -070089void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070090{
91 WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -070092 if (quiesced)
93 WARN_ON(work_pending(&sp->srcu_work));
94 else
95 flush_work(&sp->srcu_work);
Paul E. McKenneyd8be8172017-03-25 09:59:38 -070096 WARN_ON(sp->srcu_gp_running);
97 WARN_ON(sp->srcu_gp_waiting);
Paul E. McKenney2464dd92017-05-04 14:29:16 -070098 WARN_ON(sp->srcu_cb_head);
99 WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700100}
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -0700101EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700102
103/*
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700104 * Removes the count for the old reader from the appropriate element of
Paolo Bonzinicdf7abc2017-05-31 14:03:10 +0200105 * the srcu_struct.
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700106 */
107void __srcu_read_unlock(struct srcu_struct *sp, int idx)
108{
109 int newval = sp->srcu_lock_nesting[idx] - 1;
110
111 WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
112 if (!newval && READ_ONCE(sp->srcu_gp_waiting))
Peter Zijlstrab3dae102018-06-12 10:34:52 +0200113 swake_up_one(&sp->srcu_wq);
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700114}
115EXPORT_SYMBOL_GPL(__srcu_read_unlock);
116
117/*
118 * Workqueue handler to drive one grace period and invoke any callbacks
119 * that become ready as a result. Single-CPU and !PREEMPT operation
120 * means that we get away with murder on synchronization. ;-)
121 */
122void srcu_drive_gp(struct work_struct *wp)
123{
124 int idx;
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700125 struct rcu_head *lh;
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700126 struct rcu_head *rhp;
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700127 struct srcu_struct *sp;
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700128
129 sp = container_of(wp, struct srcu_struct, srcu_work);
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700130 if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700131 return; /* Already running or nothing to do. */
132
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700133 /* Remove recently arrived callbacks and wait for readers. */
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700134 WRITE_ONCE(sp->srcu_gp_running, true);
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700135 local_irq_disable();
136 lh = sp->srcu_cb_head;
137 sp->srcu_cb_head = NULL;
138 sp->srcu_cb_tail = &sp->srcu_cb_head;
139 local_irq_enable();
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700140 idx = sp->srcu_idx;
141 WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
142 WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
Peter Zijlstrab3dae102018-06-12 10:34:52 +0200143 swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700144 WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700145
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700146 /* Invoke the callbacks we removed above. */
147 while (lh) {
148 rhp = lh;
149 lh = lh->next;
150 local_bh_disable();
151 rhp->func(rhp);
152 local_bh_enable();
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700153 }
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700154
155 /*
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700156 * Enable rescheduling, and if there are more callbacks,
157 * reschedule ourselves. This can race with a call_srcu()
158 * at interrupt level, but the ->srcu_gp_running checks will
159 * straighten that out.
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700160 */
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700161 WRITE_ONCE(sp->srcu_gp_running, false);
162 if (READ_ONCE(sp->srcu_cb_head))
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700163 schedule_work(&sp->srcu_work);
164}
165EXPORT_SYMBOL_GPL(srcu_drive_gp);
166
167/*
168 * Enqueue an SRCU callback on the specified srcu_struct structure,
169 * initiating grace-period processing if it is not already running.
170 */
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700171void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700172 rcu_callback_t func)
173{
174 unsigned long flags;
175
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700176 rhp->func = func;
177 rhp->next = NULL;
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700178 local_irq_save(flags);
Paul E. McKenney2464dd92017-05-04 14:29:16 -0700179 *sp->srcu_cb_tail = rhp;
180 sp->srcu_cb_tail = &rhp->next;
Paul E. McKenneyd8be8172017-03-25 09:59:38 -0700181 local_irq_restore(flags);
182 if (!READ_ONCE(sp->srcu_gp_running))
183 schedule_work(&sp->srcu_work);
184}
185EXPORT_SYMBOL_GPL(call_srcu);
186
187/*
188 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
189 */
190void synchronize_srcu(struct srcu_struct *sp)
191{
192 struct rcu_synchronize rs;
193
194 init_rcu_head_on_stack(&rs.head);
195 init_completion(&rs.completion);
196 call_srcu(sp, &rs.head, wakeme_after_rcu);
197 wait_for_completion(&rs.completion);
198 destroy_rcu_head_on_stack(&rs.head);
199}
200EXPORT_SYMBOL_GPL(synchronize_srcu);
Paul E. McKenney825c5bd2017-05-26 16:16:40 -0700201
202/* Lockdep diagnostics. */
203void __init rcu_scheduler_starting(void)
204{
205 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
206}