blob: 33d896d8590233e59fc8b73f626a45f20429e318 [file] [log] [blame]
Paul E. McKenney96b903f2019-01-17 10:19:01 -08001// SPDX-License-Identifier: GPL-2.0+
Oleg Nesterovcc44ca82015-08-21 19:42:44 +02002/*
3 * RCU-based infrastructure for lightweight reader-writer locking
4 *
Oleg Nesterovcc44ca82015-08-21 19:42:44 +02005 * Copyright (c) 2015, Red Hat, Inc.
6 *
7 * Author: Oleg Nesterov <oleg@redhat.com>
8 */
9
10#include <linux/rcu_sync.h>
11#include <linux/sched.h>
12
Oleg Nesterov89da3b92019-04-25 18:50:55 +020013enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
Oleg Nesterovcc44ca82015-08-21 19:42:44 +020014
15#define rss_lock gp_wait.lock
16
17/**
18 * rcu_sync_init() - Initialize an rcu_sync structure
19 * @rsp: Pointer to rcu_sync structure to be initialized
Oleg Nesterovcc44ca82015-08-21 19:42:44 +020020 */
Oleg Nesterov95bf33b2019-04-23 14:07:24 +020021void rcu_sync_init(struct rcu_sync *rsp)
Oleg Nesterovcc44ca82015-08-21 19:42:44 +020022{
23 memset(rsp, 0, sizeof(*rsp));
24 init_waitqueue_head(&rsp->gp_wait);
Oleg Nesterovcc44ca82015-08-21 19:42:44 +020025}
26
27/**
Paul E. McKenney27fdb352017-10-19 14:26:21 -070028 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
29 * @rsp: Pointer to rcu_sync structure to use for synchronization
30 *
Peter Zijlstra3942a9b2016-08-11 18:54:13 +020031 * Must be called after rcu_sync_init() and before first use.
32 *
33 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
34 * pairs turn into NO-OPs.
35 */
36void rcu_sync_enter_start(struct rcu_sync *rsp)
37{
38 rsp->gp_count++;
39 rsp->gp_state = GP_PASSED;
40}
41
Oleg Nesterov89da3b92019-04-25 18:50:55 +020042
43static void rcu_sync_func(struct rcu_head *rhp);
44
45static void rcu_sync_call(struct rcu_sync *rsp)
46{
47 call_rcu(&rsp->cb_head, rcu_sync_func);
48}
49
50/**
51 * rcu_sync_func() - Callback function managing reader access to fastpath
52 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
53 *
54 * This function is passed to call_rcu() function by rcu_sync_enter() and
55 * rcu_sync_exit(), so that it is invoked after a grace period following the
56 * that invocation of enter/exit.
57 *
58 * If it is called by rcu_sync_enter() it signals that all the readers were
59 * switched onto slow path.
60 *
61 * If it is called by rcu_sync_exit() it takes action based on events that
62 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
63 * and rcu_sync_exit() pairs need not wait for a grace period.
64 *
65 * If another rcu_sync_enter() is invoked before the grace period
66 * ended, reset state to allow the next rcu_sync_exit() to let the
67 * readers back onto their fastpaths (after a grace period). If both
68 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
69 * before the grace period ended, re-invoke call_rcu() on behalf of that
70 * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
71 * can again use their fastpaths.
72 */
73static void rcu_sync_func(struct rcu_head *rhp)
74{
75 struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
76 unsigned long flags;
77
78 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
79 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
80
81 spin_lock_irqsave(&rsp->rss_lock, flags);
82 if (rsp->gp_count) {
83 /*
84 * We're at least a GP after the GP_IDLE->GP_ENTER transition.
85 */
86 WRITE_ONCE(rsp->gp_state, GP_PASSED);
87 wake_up_locked(&rsp->gp_wait);
88 } else if (rsp->gp_state == GP_REPLAY) {
89 /*
90 * A new rcu_sync_exit() has happened; requeue the callback to
91 * catch a later GP.
92 */
93 WRITE_ONCE(rsp->gp_state, GP_EXIT);
94 rcu_sync_call(rsp);
95 } else {
96 /*
Ingo Molnara616aec2021-03-22 22:29:10 -070097 * We're at least a GP after the last rcu_sync_exit(); everybody
Oleg Nesterov89da3b92019-04-25 18:50:55 +020098 * will now have observed the write side critical section.
Ingo Molnara616aec2021-03-22 22:29:10 -070099 * Let 'em rip!
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200100 */
101 WRITE_ONCE(rsp->gp_state, GP_IDLE);
102 }
103 spin_unlock_irqrestore(&rsp->rss_lock, flags);
104}
105
Peter Zijlstra3942a9b2016-08-11 18:54:13 +0200106/**
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200107 * rcu_sync_enter() - Force readers onto slowpath
108 * @rsp: Pointer to rcu_sync structure to use for synchronization
109 *
110 * This function is used by updaters who need readers to make use of
111 * a slowpath during the update. After this function returns, all
112 * subsequent calls to rcu_sync_is_idle() will return false, which
113 * tells readers to stay off their fastpaths. A later call to
114 * rcu_sync_exit() re-enables reader slowpaths.
115 *
116 * When called in isolation, rcu_sync_enter() must wait for a grace
117 * period, however, closely spaced calls to rcu_sync_enter() can
118 * optimize away the grace-period wait via a state machine implemented
119 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
120 */
121void rcu_sync_enter(struct rcu_sync *rsp)
122{
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200123 int gp_state;
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200124
125 spin_lock_irq(&rsp->rss_lock);
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200126 gp_state = rsp->gp_state;
127 if (gp_state == GP_IDLE) {
128 WRITE_ONCE(rsp->gp_state, GP_ENTER);
129 WARN_ON_ONCE(rsp->gp_count);
130 /*
131 * Note that we could simply do rcu_sync_call(rsp) here and
132 * avoid the "if (gp_state == GP_IDLE)" block below.
133 *
134 * However, synchronize_rcu() can be faster if rcu_expedited
135 * or rcu_blocking_is_gp() is true.
136 *
137 * Another reason is that we can't wait for rcu callback if
138 * we are called at early boot time but this shouldn't happen.
139 */
140 }
141 rsp->gp_count++;
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200142 spin_unlock_irq(&rsp->rss_lock);
143
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200144 if (gp_state == GP_IDLE) {
145 /*
146 * See the comment above, this simply does the "synchronous"
147 * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
148 */
Oleg Nesterov95bf33b2019-04-23 14:07:24 +0200149 synchronize_rcu();
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200150 rcu_sync_func(&rsp->cb_head);
151 /* Not really needed, wait_event() would see GP_PASSED. */
152 return;
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200153 }
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200154
155 wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200156}
157
158/**
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200159 * rcu_sync_exit() - Allow readers back onto fast path after grace period
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200160 * @rsp: Pointer to rcu_sync structure to use for synchronization
161 *
162 * This function is used by updaters who have completed, and can therefore
163 * now allow readers to make use of their fastpaths after a grace period
164 * has elapsed. After this grace period has completed, all subsequent
165 * calls to rcu_sync_is_idle() will return true, which tells readers that
166 * they can once again use their fastpaths.
167 */
168void rcu_sync_exit(struct rcu_sync *rsp)
169{
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200170 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
171 WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
172
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200173 spin_lock_irq(&rsp->rss_lock);
174 if (!--rsp->gp_count) {
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200175 if (rsp->gp_state == GP_PASSED) {
176 WRITE_ONCE(rsp->gp_state, GP_EXIT);
177 rcu_sync_call(rsp);
178 } else if (rsp->gp_state == GP_EXIT) {
179 WRITE_ONCE(rsp->gp_state, GP_REPLAY);
Oleg Nesterovcc44ca82015-08-21 19:42:44 +0200180 }
181 }
182 spin_unlock_irq(&rsp->rss_lock);
183}
Oleg Nesterov07899a62015-08-21 19:42:52 +0200184
185/**
186 * rcu_sync_dtor() - Clean up an rcu_sync structure
187 * @rsp: Pointer to rcu_sync structure to be cleaned up
188 */
189void rcu_sync_dtor(struct rcu_sync *rsp)
190{
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200191 int gp_state;
Oleg Nesterov07899a62015-08-21 19:42:52 +0200192
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200193 WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
194 WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
Oleg Nesterov07899a62015-08-21 19:42:52 +0200195
196 spin_lock_irq(&rsp->rss_lock);
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200197 if (rsp->gp_state == GP_REPLAY)
198 WRITE_ONCE(rsp->gp_state, GP_EXIT);
199 gp_state = rsp->gp_state;
Oleg Nesterov07899a62015-08-21 19:42:52 +0200200 spin_unlock_irq(&rsp->rss_lock);
201
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200202 if (gp_state != GP_IDLE) {
Oleg Nesterov95bf33b2019-04-23 14:07:24 +0200203 rcu_barrier();
Oleg Nesterov89da3b92019-04-25 18:50:55 +0200204 WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
Oleg Nesterov07899a62015-08-21 19:42:52 +0200205 }
206}