blob: 47bcd288977de78bd41697254106704907d90734 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
David Sterbac1d7c512018-04-03 19:23:33 +02005
Chris Mason925baed2008-06-25 16:01:30 -04006#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -04007#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040010#include <asm/bug.h>
Chris Mason925baed2008-06-25 16:01:30 -040011#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
Eric Sandeen48a3b632013-04-25 20:41:01 +000015static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
Chris Masonbd681512011-07-16 15:23:14 -040016
David Sterbae4e9fd02018-08-24 14:45:20 +020017#ifdef CONFIG_BTRFS_DEBUG
18static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19{
20 WARN_ON(atomic_read(&eb->spinning_writers));
21 atomic_inc(&eb->spinning_writers);
22}
23
24static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25{
26 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27 atomic_dec(&eb->spinning_writers);
28}
29
30static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31{
32 WARN_ON(atomic_read(&eb->spinning_writers));
33}
34
David Sterba225948d2018-08-24 15:53:42 +020035static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
36{
37 atomic_inc(&eb->spinning_readers);
38}
39
40static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
41{
42 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
43 atomic_dec(&eb->spinning_readers);
44}
45
David Sterbae4e9fd02018-08-24 14:45:20 +020046#else
47static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
48static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
49static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
David Sterba225948d2018-08-24 15:53:42 +020050static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
51static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
David Sterbae4e9fd02018-08-24 14:45:20 +020052#endif
53
David Sterbab95be2d2018-04-04 01:43:05 +020054void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050055{
Chris Masonea4ebde2014-06-19 14:16:52 -070056 /*
David Sterbab95be2d2018-04-04 01:43:05 +020057 * No lock is required. The lock owner may change if we have a read
58 * lock, but it won't change to or away from us. If we have the write
59 * lock, we are the owner and it'll never change.
Chris Masonea4ebde2014-06-19 14:16:52 -070060 */
61 if (eb->lock_nested && current->pid == eb->lock_owner)
62 return;
David Sterbab95be2d2018-04-04 01:43:05 +020063 btrfs_assert_tree_read_locked(eb);
64 atomic_inc(&eb->blocking_readers);
David Sterbaafd495a2018-08-24 15:57:38 +020065 btrfs_assert_spinning_readers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +020066 read_unlock(&eb->lock);
67}
68
69void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
70{
71 /*
72 * No lock is required. The lock owner may change if we have a read
73 * lock, but it won't change to or away from us. If we have the write
74 * lock, we are the owner and it'll never change.
75 */
76 if (eb->lock_nested && current->pid == eb->lock_owner)
77 return;
78 if (atomic_read(&eb->blocking_writers) == 0) {
David Sterba843ccf92018-08-24 14:56:28 +020079 btrfs_assert_spinning_writers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +020080 btrfs_assert_tree_locked(eb);
81 atomic_inc(&eb->blocking_writers);
82 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -040083 }
Chris Masonb4ce94d2009-02-04 09:25:08 -050084}
Chris Masond3977122009-01-05 21:25:51 -050085
David Sterbaaa12c022018-04-04 01:52:31 +020086void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
87{
88 /*
89 * No lock is required. The lock owner may change if we have a read
90 * lock, but it won't change to or away from us. If we have the write
91 * lock, we are the owner and it'll never change.
92 */
93 if (eb->lock_nested && current->pid == eb->lock_owner)
94 return;
95 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
96 read_lock(&eb->lock);
David Sterbaafd495a2018-08-24 15:57:38 +020097 btrfs_assert_spinning_readers_get(eb);
David Sterbaaa12c022018-04-04 01:52:31 +020098 /* atomic_dec_and_test implies a barrier */
99 if (atomic_dec_and_test(&eb->blocking_readers))
100 cond_wake_up_nomb(&eb->read_lock_wq);
101}
102
103void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500104{
Chris Masonea4ebde2014-06-19 14:16:52 -0700105 /*
106 * no lock is required. The lock owner may change if
107 * we have a read lock, but it won't change to or away
108 * from us. If we have the write lock, we are the owner
109 * and it'll never change.
110 */
111 if (eb->lock_nested && current->pid == eb->lock_owner)
112 return;
David Sterbaaa12c022018-04-04 01:52:31 +0200113 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
114 write_lock(&eb->lock);
David Sterba843ccf92018-08-24 14:56:28 +0200115 btrfs_assert_spinning_writers_get(eb);
David Sterbaaa12c022018-04-04 01:52:31 +0200116 /* atomic_dec_and_test implies a barrier */
117 if (atomic_dec_and_test(&eb->blocking_writers))
118 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500119}
120
121/*
Chris Masonbd681512011-07-16 15:23:14 -0400122 * take a spinning read lock. This will wait for any blocking
123 * writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500124 */
Chris Masonbd681512011-07-16 15:23:14 -0400125void btrfs_tree_read_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500126{
Chris Masonbd681512011-07-16 15:23:14 -0400127again:
Chris Masonea4ebde2014-06-19 14:16:52 -0700128 BUG_ON(!atomic_read(&eb->blocking_writers) &&
129 current->pid == eb->lock_owner);
130
Arne Jansen5b25f702011-09-13 10:55:48 +0200131 read_lock(&eb->lock);
132 if (atomic_read(&eb->blocking_writers) &&
133 current->pid == eb->lock_owner) {
134 /*
135 * This extent is already write-locked by our thread. We allow
136 * an additional read lock to be added because it's for the same
137 * thread. btrfs_find_all_roots() depends on this as it may be
138 * called on a partly (write-)locked tree.
139 */
140 BUG_ON(eb->lock_nested);
141 eb->lock_nested = 1;
142 read_unlock(&eb->lock);
143 return;
144 }
Chris Masonbd681512011-07-16 15:23:14 -0400145 if (atomic_read(&eb->blocking_writers)) {
146 read_unlock(&eb->lock);
Liu Bo39f9d022012-12-27 09:01:22 +0000147 wait_event(eb->write_lock_wq,
148 atomic_read(&eb->blocking_writers) == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400149 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500150 }
Chris Masonbd681512011-07-16 15:23:14 -0400151 atomic_inc(&eb->read_locks);
David Sterbaafd495a2018-08-24 15:57:38 +0200152 btrfs_assert_spinning_readers_get(eb);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500153}
154
155/*
Chris Masonf82c4582014-11-19 10:25:09 -0800156 * take a spinning read lock.
157 * returns 1 if we get the read lock and 0 if we don't
158 * this won't wait for blocking writers
159 */
160int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
161{
162 if (atomic_read(&eb->blocking_writers))
163 return 0;
164
165 read_lock(&eb->lock);
166 if (atomic_read(&eb->blocking_writers)) {
167 read_unlock(&eb->lock);
168 return 0;
169 }
170 atomic_inc(&eb->read_locks);
David Sterbaafd495a2018-08-24 15:57:38 +0200171 btrfs_assert_spinning_readers_get(eb);
Chris Masonf82c4582014-11-19 10:25:09 -0800172 return 1;
173}
174
175/*
Chris Masonbd681512011-07-16 15:23:14 -0400176 * returns 1 if we get the read lock and 0 if we don't
177 * this won't wait for blocking writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500178 */
Chris Masonbd681512011-07-16 15:23:14 -0400179int btrfs_try_tree_read_lock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400180{
Chris Masonbd681512011-07-16 15:23:14 -0400181 if (atomic_read(&eb->blocking_writers))
182 return 0;
Chris Mason66d7e852009-03-12 20:12:45 -0400183
Chris Masonea4ebde2014-06-19 14:16:52 -0700184 if (!read_trylock(&eb->lock))
185 return 0;
186
Chris Masonbd681512011-07-16 15:23:14 -0400187 if (atomic_read(&eb->blocking_writers)) {
188 read_unlock(&eb->lock);
189 return 0;
Chris Masonf9efa9c2008-06-25 16:14:04 -0400190 }
Chris Masonbd681512011-07-16 15:23:14 -0400191 atomic_inc(&eb->read_locks);
David Sterbaafd495a2018-08-24 15:57:38 +0200192 btrfs_assert_spinning_readers_get(eb);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500193 return 1;
194}
195
196/*
Chris Masonbd681512011-07-16 15:23:14 -0400197 * returns 1 if we get the read lock and 0 if we don't
198 * this won't wait for blocking writers or readers
199 */
200int btrfs_try_tree_write_lock(struct extent_buffer *eb)
201{
202 if (atomic_read(&eb->blocking_writers) ||
203 atomic_read(&eb->blocking_readers))
204 return 0;
Chris Masonea4ebde2014-06-19 14:16:52 -0700205
Chris Masonf82c4582014-11-19 10:25:09 -0800206 write_lock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400207 if (atomic_read(&eb->blocking_writers) ||
208 atomic_read(&eb->blocking_readers)) {
209 write_unlock(&eb->lock);
210 return 0;
211 }
212 atomic_inc(&eb->write_locks);
David Sterba843ccf92018-08-24 14:56:28 +0200213 btrfs_assert_spinning_writers_get(eb);
Arne Jansen5b25f702011-09-13 10:55:48 +0200214 eb->lock_owner = current->pid;
Chris Masonbd681512011-07-16 15:23:14 -0400215 return 1;
216}
217
218/*
219 * drop a spinning read lock
220 */
221void btrfs_tree_read_unlock(struct extent_buffer *eb)
222{
Chris Masonea4ebde2014-06-19 14:16:52 -0700223 /*
224 * if we're nested, we have the write lock. No new locking
225 * is needed as long as we are the lock owner.
226 * The write unlock will do a barrier for us, and the lock_nested
227 * field only matters to the lock owner.
228 */
229 if (eb->lock_nested && current->pid == eb->lock_owner) {
230 eb->lock_nested = 0;
231 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200232 }
Chris Masonbd681512011-07-16 15:23:14 -0400233 btrfs_assert_tree_read_locked(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200234 btrfs_assert_spinning_readers_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400235 atomic_dec(&eb->read_locks);
236 read_unlock(&eb->lock);
237}
238
239/*
240 * drop a blocking read lock
241 */
242void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
243{
Chris Masonea4ebde2014-06-19 14:16:52 -0700244 /*
245 * if we're nested, we have the write lock. No new locking
246 * is needed as long as we are the lock owner.
247 * The write unlock will do a barrier for us, and the lock_nested
248 * field only matters to the lock owner.
249 */
250 if (eb->lock_nested && current->pid == eb->lock_owner) {
251 eb->lock_nested = 0;
252 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200253 }
Chris Masonbd681512011-07-16 15:23:14 -0400254 btrfs_assert_tree_read_locked(eb);
255 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
David Sterba093258e2018-02-26 16:15:17 +0100256 /* atomic_dec_and_test implies a barrier */
257 if (atomic_dec_and_test(&eb->blocking_readers))
258 cond_wake_up_nomb(&eb->read_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400259 atomic_dec(&eb->read_locks);
260}
261
262/*
263 * take a spinning write lock. This will wait for both
264 * blocking readers or writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500265 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100266void btrfs_tree_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500267{
Zhaolei166f66d2015-08-06 22:39:36 +0800268 WARN_ON(eb->lock_owner == current->pid);
Chris Masonbd681512011-07-16 15:23:14 -0400269again:
270 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
271 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
272 write_lock(&eb->lock);
David Sterba970e74d2018-04-04 02:11:50 +0200273 if (atomic_read(&eb->blocking_readers) ||
274 atomic_read(&eb->blocking_writers)) {
Chris Masonbd681512011-07-16 15:23:14 -0400275 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400276 goto again;
277 }
David Sterba843ccf92018-08-24 14:56:28 +0200278 btrfs_assert_spinning_writers_get(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400279 atomic_inc(&eb->write_locks);
Arne Jansen5b25f702011-09-13 10:55:48 +0200280 eb->lock_owner = current->pid;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500281}
282
Chris Masonbd681512011-07-16 15:23:14 -0400283/*
284 * drop a spinning or a blocking write lock.
285 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100286void btrfs_tree_unlock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400287{
Chris Masonbd681512011-07-16 15:23:14 -0400288 int blockers = atomic_read(&eb->blocking_writers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500289
Chris Masonbd681512011-07-16 15:23:14 -0400290 BUG_ON(blockers > 1);
291
292 btrfs_assert_tree_locked(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700293 eb->lock_owner = 0;
Chris Masonbd681512011-07-16 15:23:14 -0400294 atomic_dec(&eb->write_locks);
295
296 if (blockers) {
David Sterba843ccf92018-08-24 14:56:28 +0200297 btrfs_assert_no_spinning_writers(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400298 atomic_dec(&eb->blocking_writers);
David Sterba093258e2018-02-26 16:15:17 +0100299 /* Use the lighter barrier after atomic */
Nikolay Borisov2e32ef82018-02-14 14:37:26 +0200300 smp_mb__after_atomic();
David Sterba093258e2018-02-26 16:15:17 +0100301 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400302 } else {
David Sterba843ccf92018-08-24 14:56:28 +0200303 btrfs_assert_spinning_writers_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400304 write_unlock(&eb->lock);
305 }
Chris Mason925baed2008-06-25 16:01:30 -0400306}
307
Chris Masonb9447ef82009-03-09 11:45:38 -0400308void btrfs_assert_tree_locked(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400309{
Chris Masonbd681512011-07-16 15:23:14 -0400310 BUG_ON(!atomic_read(&eb->write_locks));
311}
312
Eric Sandeen48a3b632013-04-25 20:41:01 +0000313static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
Chris Masonbd681512011-07-16 15:23:14 -0400314{
315 BUG_ON(!atomic_read(&eb->read_locks));
Chris Mason925baed2008-06-25 16:01:30 -0400316}