blob: 2dd3ae524aa31fd9e2bdf0057978fc1ab9265f21 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
David Sterbac1d7c512018-04-03 19:23:33 +02005
Chris Mason925baed2008-06-25 16:01:30 -04006#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -04007#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040010#include <asm/bug.h>
Chris Mason925baed2008-06-25 16:01:30 -040011#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
Eric Sandeen48a3b632013-04-25 20:41:01 +000015static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
Chris Masonbd681512011-07-16 15:23:14 -040016
David Sterbae4e9fd02018-08-24 14:45:20 +020017#ifdef CONFIG_BTRFS_DEBUG
18static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19{
20 WARN_ON(atomic_read(&eb->spinning_writers));
21 atomic_inc(&eb->spinning_writers);
22}
23
24static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25{
26 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27 atomic_dec(&eb->spinning_writers);
28}
29
30static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31{
32 WARN_ON(atomic_read(&eb->spinning_writers));
33}
34
David Sterba225948d2018-08-24 15:53:42 +020035static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
36{
37 atomic_inc(&eb->spinning_readers);
38}
39
40static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
41{
42 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
43 atomic_dec(&eb->spinning_readers);
44}
45
David Sterbae4e9fd02018-08-24 14:45:20 +020046#else
47static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
48static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
49static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
David Sterba225948d2018-08-24 15:53:42 +020050static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
51static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
David Sterbae4e9fd02018-08-24 14:45:20 +020052#endif
53
David Sterbab95be2d2018-04-04 01:43:05 +020054void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050055{
Chris Masonea4ebde2014-06-19 14:16:52 -070056 /*
David Sterbab95be2d2018-04-04 01:43:05 +020057 * No lock is required. The lock owner may change if we have a read
58 * lock, but it won't change to or away from us. If we have the write
59 * lock, we are the owner and it'll never change.
Chris Masonea4ebde2014-06-19 14:16:52 -070060 */
61 if (eb->lock_nested && current->pid == eb->lock_owner)
62 return;
David Sterbab95be2d2018-04-04 01:43:05 +020063 btrfs_assert_tree_read_locked(eb);
64 atomic_inc(&eb->blocking_readers);
65 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
66 atomic_dec(&eb->spinning_readers);
67 read_unlock(&eb->lock);
68}
69
70void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
71{
72 /*
73 * No lock is required. The lock owner may change if we have a read
74 * lock, but it won't change to or away from us. If we have the write
75 * lock, we are the owner and it'll never change.
76 */
77 if (eb->lock_nested && current->pid == eb->lock_owner)
78 return;
79 if (atomic_read(&eb->blocking_writers) == 0) {
David Sterba843ccf92018-08-24 14:56:28 +020080 btrfs_assert_spinning_writers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +020081 btrfs_assert_tree_locked(eb);
82 atomic_inc(&eb->blocking_writers);
83 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -040084 }
Chris Masonb4ce94d2009-02-04 09:25:08 -050085}
Chris Masond3977122009-01-05 21:25:51 -050086
David Sterbaaa12c022018-04-04 01:52:31 +020087void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
88{
89 /*
90 * No lock is required. The lock owner may change if we have a read
91 * lock, but it won't change to or away from us. If we have the write
92 * lock, we are the owner and it'll never change.
93 */
94 if (eb->lock_nested && current->pid == eb->lock_owner)
95 return;
96 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
97 read_lock(&eb->lock);
98 atomic_inc(&eb->spinning_readers);
99 /* atomic_dec_and_test implies a barrier */
100 if (atomic_dec_and_test(&eb->blocking_readers))
101 cond_wake_up_nomb(&eb->read_lock_wq);
102}
103
104void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500105{
Chris Masonea4ebde2014-06-19 14:16:52 -0700106 /*
107 * no lock is required. The lock owner may change if
108 * we have a read lock, but it won't change to or away
109 * from us. If we have the write lock, we are the owner
110 * and it'll never change.
111 */
112 if (eb->lock_nested && current->pid == eb->lock_owner)
113 return;
David Sterbaaa12c022018-04-04 01:52:31 +0200114 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
115 write_lock(&eb->lock);
David Sterba843ccf92018-08-24 14:56:28 +0200116 btrfs_assert_spinning_writers_get(eb);
David Sterbaaa12c022018-04-04 01:52:31 +0200117 /* atomic_dec_and_test implies a barrier */
118 if (atomic_dec_and_test(&eb->blocking_writers))
119 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500120}
121
122/*
Chris Masonbd681512011-07-16 15:23:14 -0400123 * take a spinning read lock. This will wait for any blocking
124 * writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500125 */
Chris Masonbd681512011-07-16 15:23:14 -0400126void btrfs_tree_read_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500127{
Chris Masonbd681512011-07-16 15:23:14 -0400128again:
Chris Masonea4ebde2014-06-19 14:16:52 -0700129 BUG_ON(!atomic_read(&eb->blocking_writers) &&
130 current->pid == eb->lock_owner);
131
Arne Jansen5b25f702011-09-13 10:55:48 +0200132 read_lock(&eb->lock);
133 if (atomic_read(&eb->blocking_writers) &&
134 current->pid == eb->lock_owner) {
135 /*
136 * This extent is already write-locked by our thread. We allow
137 * an additional read lock to be added because it's for the same
138 * thread. btrfs_find_all_roots() depends on this as it may be
139 * called on a partly (write-)locked tree.
140 */
141 BUG_ON(eb->lock_nested);
142 eb->lock_nested = 1;
143 read_unlock(&eb->lock);
144 return;
145 }
Chris Masonbd681512011-07-16 15:23:14 -0400146 if (atomic_read(&eb->blocking_writers)) {
147 read_unlock(&eb->lock);
Liu Bo39f9d022012-12-27 09:01:22 +0000148 wait_event(eb->write_lock_wq,
149 atomic_read(&eb->blocking_writers) == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400150 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500151 }
Chris Masonbd681512011-07-16 15:23:14 -0400152 atomic_inc(&eb->read_locks);
153 atomic_inc(&eb->spinning_readers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500154}
155
156/*
Chris Masonf82c4582014-11-19 10:25:09 -0800157 * take a spinning read lock.
158 * returns 1 if we get the read lock and 0 if we don't
159 * this won't wait for blocking writers
160 */
161int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
162{
163 if (atomic_read(&eb->blocking_writers))
164 return 0;
165
166 read_lock(&eb->lock);
167 if (atomic_read(&eb->blocking_writers)) {
168 read_unlock(&eb->lock);
169 return 0;
170 }
171 atomic_inc(&eb->read_locks);
172 atomic_inc(&eb->spinning_readers);
173 return 1;
174}
175
176/*
Chris Masonbd681512011-07-16 15:23:14 -0400177 * returns 1 if we get the read lock and 0 if we don't
178 * this won't wait for blocking writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500179 */
Chris Masonbd681512011-07-16 15:23:14 -0400180int btrfs_try_tree_read_lock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400181{
Chris Masonbd681512011-07-16 15:23:14 -0400182 if (atomic_read(&eb->blocking_writers))
183 return 0;
Chris Mason66d7e852009-03-12 20:12:45 -0400184
Chris Masonea4ebde2014-06-19 14:16:52 -0700185 if (!read_trylock(&eb->lock))
186 return 0;
187
Chris Masonbd681512011-07-16 15:23:14 -0400188 if (atomic_read(&eb->blocking_writers)) {
189 read_unlock(&eb->lock);
190 return 0;
Chris Masonf9efa9c2008-06-25 16:14:04 -0400191 }
Chris Masonbd681512011-07-16 15:23:14 -0400192 atomic_inc(&eb->read_locks);
193 atomic_inc(&eb->spinning_readers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500194 return 1;
195}
196
197/*
Chris Masonbd681512011-07-16 15:23:14 -0400198 * returns 1 if we get the read lock and 0 if we don't
199 * this won't wait for blocking writers or readers
200 */
201int btrfs_try_tree_write_lock(struct extent_buffer *eb)
202{
203 if (atomic_read(&eb->blocking_writers) ||
204 atomic_read(&eb->blocking_readers))
205 return 0;
Chris Masonea4ebde2014-06-19 14:16:52 -0700206
Chris Masonf82c4582014-11-19 10:25:09 -0800207 write_lock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400208 if (atomic_read(&eb->blocking_writers) ||
209 atomic_read(&eb->blocking_readers)) {
210 write_unlock(&eb->lock);
211 return 0;
212 }
213 atomic_inc(&eb->write_locks);
David Sterba843ccf92018-08-24 14:56:28 +0200214 btrfs_assert_spinning_writers_get(eb);
Arne Jansen5b25f702011-09-13 10:55:48 +0200215 eb->lock_owner = current->pid;
Chris Masonbd681512011-07-16 15:23:14 -0400216 return 1;
217}
218
219/*
220 * drop a spinning read lock
221 */
222void btrfs_tree_read_unlock(struct extent_buffer *eb)
223{
Chris Masonea4ebde2014-06-19 14:16:52 -0700224 /*
225 * if we're nested, we have the write lock. No new locking
226 * is needed as long as we are the lock owner.
227 * The write unlock will do a barrier for us, and the lock_nested
228 * field only matters to the lock owner.
229 */
230 if (eb->lock_nested && current->pid == eb->lock_owner) {
231 eb->lock_nested = 0;
232 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200233 }
Chris Masonbd681512011-07-16 15:23:14 -0400234 btrfs_assert_tree_read_locked(eb);
235 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
236 atomic_dec(&eb->spinning_readers);
237 atomic_dec(&eb->read_locks);
238 read_unlock(&eb->lock);
239}
240
241/*
242 * drop a blocking read lock
243 */
244void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
245{
Chris Masonea4ebde2014-06-19 14:16:52 -0700246 /*
247 * if we're nested, we have the write lock. No new locking
248 * is needed as long as we are the lock owner.
249 * The write unlock will do a barrier for us, and the lock_nested
250 * field only matters to the lock owner.
251 */
252 if (eb->lock_nested && current->pid == eb->lock_owner) {
253 eb->lock_nested = 0;
254 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200255 }
Chris Masonbd681512011-07-16 15:23:14 -0400256 btrfs_assert_tree_read_locked(eb);
257 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
David Sterba093258e2018-02-26 16:15:17 +0100258 /* atomic_dec_and_test implies a barrier */
259 if (atomic_dec_and_test(&eb->blocking_readers))
260 cond_wake_up_nomb(&eb->read_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400261 atomic_dec(&eb->read_locks);
262}
263
264/*
265 * take a spinning write lock. This will wait for both
266 * blocking readers or writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500267 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100268void btrfs_tree_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500269{
Zhaolei166f66d2015-08-06 22:39:36 +0800270 WARN_ON(eb->lock_owner == current->pid);
Chris Masonbd681512011-07-16 15:23:14 -0400271again:
272 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
273 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
274 write_lock(&eb->lock);
David Sterba970e74d2018-04-04 02:11:50 +0200275 if (atomic_read(&eb->blocking_readers) ||
276 atomic_read(&eb->blocking_writers)) {
Chris Masonbd681512011-07-16 15:23:14 -0400277 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400278 goto again;
279 }
David Sterba843ccf92018-08-24 14:56:28 +0200280 btrfs_assert_spinning_writers_get(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400281 atomic_inc(&eb->write_locks);
Arne Jansen5b25f702011-09-13 10:55:48 +0200282 eb->lock_owner = current->pid;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500283}
284
Chris Masonbd681512011-07-16 15:23:14 -0400285/*
286 * drop a spinning or a blocking write lock.
287 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100288void btrfs_tree_unlock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400289{
Chris Masonbd681512011-07-16 15:23:14 -0400290 int blockers = atomic_read(&eb->blocking_writers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500291
Chris Masonbd681512011-07-16 15:23:14 -0400292 BUG_ON(blockers > 1);
293
294 btrfs_assert_tree_locked(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700295 eb->lock_owner = 0;
Chris Masonbd681512011-07-16 15:23:14 -0400296 atomic_dec(&eb->write_locks);
297
298 if (blockers) {
David Sterba843ccf92018-08-24 14:56:28 +0200299 btrfs_assert_no_spinning_writers(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400300 atomic_dec(&eb->blocking_writers);
David Sterba093258e2018-02-26 16:15:17 +0100301 /* Use the lighter barrier after atomic */
Nikolay Borisov2e32ef82018-02-14 14:37:26 +0200302 smp_mb__after_atomic();
David Sterba093258e2018-02-26 16:15:17 +0100303 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400304 } else {
David Sterba843ccf92018-08-24 14:56:28 +0200305 btrfs_assert_spinning_writers_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400306 write_unlock(&eb->lock);
307 }
Chris Mason925baed2008-06-25 16:01:30 -0400308}
309
Chris Masonb9447ef82009-03-09 11:45:38 -0400310void btrfs_assert_tree_locked(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400311{
Chris Masonbd681512011-07-16 15:23:14 -0400312 BUG_ON(!atomic_read(&eb->write_locks));
313}
314
Eric Sandeen48a3b632013-04-25 20:41:01 +0000315static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
Chris Masonbd681512011-07-16 15:23:14 -0400316{
317 BUG_ON(!atomic_read(&eb->read_locks));
Chris Mason925baed2008-06-25 16:01:30 -0400318}