blob: 1088cf322fdde2909e7db70d462118990dcce14e [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
David Sterbac1d7c512018-04-03 19:23:33 +02005
Chris Mason925baed2008-06-25 16:01:30 -04006#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -04007#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040010#include <asm/bug.h>
Chris Mason925baed2008-06-25 16:01:30 -040011#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
David Sterbae4e9fd02018-08-24 14:45:20 +020015#ifdef CONFIG_BTRFS_DEBUG
16static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
17{
18 WARN_ON(atomic_read(&eb->spinning_writers));
19 atomic_inc(&eb->spinning_writers);
20}
21
22static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
23{
24 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
25 atomic_dec(&eb->spinning_writers);
26}
27
28static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
29{
30 WARN_ON(atomic_read(&eb->spinning_writers));
31}
32
David Sterba225948d2018-08-24 15:53:42 +020033static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
34{
35 atomic_inc(&eb->spinning_readers);
36}
37
38static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
39{
40 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41 atomic_dec(&eb->spinning_readers);
42}
43
David Sterba58a2ddae2018-08-24 16:13:41 +020044static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
45{
46 atomic_inc(&eb->read_locks);
47}
48
49static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
50{
51 atomic_dec(&eb->read_locks);
52}
53
54static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
55{
56 BUG_ON(!atomic_read(&eb->read_locks));
57}
58
David Sterbae4e9fd02018-08-24 14:45:20 +020059#else
60static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
61static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
62static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
David Sterba225948d2018-08-24 15:53:42 +020063static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
64static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
David Sterba58a2ddae2018-08-24 16:13:41 +020065static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
66static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
67static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
David Sterbae4e9fd02018-08-24 14:45:20 +020068#endif
69
David Sterbab95be2d2018-04-04 01:43:05 +020070void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050071{
Chris Masonea4ebde2014-06-19 14:16:52 -070072 /*
David Sterbab95be2d2018-04-04 01:43:05 +020073 * No lock is required. The lock owner may change if we have a read
74 * lock, but it won't change to or away from us. If we have the write
75 * lock, we are the owner and it'll never change.
Chris Masonea4ebde2014-06-19 14:16:52 -070076 */
77 if (eb->lock_nested && current->pid == eb->lock_owner)
78 return;
David Sterbab95be2d2018-04-04 01:43:05 +020079 btrfs_assert_tree_read_locked(eb);
80 atomic_inc(&eb->blocking_readers);
David Sterbaafd495a2018-08-24 15:57:38 +020081 btrfs_assert_spinning_readers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +020082 read_unlock(&eb->lock);
83}
84
85void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
86{
87 /*
88 * No lock is required. The lock owner may change if we have a read
89 * lock, but it won't change to or away from us. If we have the write
90 * lock, we are the owner and it'll never change.
91 */
92 if (eb->lock_nested && current->pid == eb->lock_owner)
93 return;
94 if (atomic_read(&eb->blocking_writers) == 0) {
David Sterba843ccf92018-08-24 14:56:28 +020095 btrfs_assert_spinning_writers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +020096 btrfs_assert_tree_locked(eb);
97 atomic_inc(&eb->blocking_writers);
98 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -040099 }
Chris Masonb4ce94d2009-02-04 09:25:08 -0500100}
Chris Masond3977122009-01-05 21:25:51 -0500101
David Sterbaaa12c022018-04-04 01:52:31 +0200102void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
103{
104 /*
105 * No lock is required. The lock owner may change if we have a read
106 * lock, but it won't change to or away from us. If we have the write
107 * lock, we are the owner and it'll never change.
108 */
109 if (eb->lock_nested && current->pid == eb->lock_owner)
110 return;
111 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
112 read_lock(&eb->lock);
David Sterbaafd495a2018-08-24 15:57:38 +0200113 btrfs_assert_spinning_readers_get(eb);
David Sterbaaa12c022018-04-04 01:52:31 +0200114 /* atomic_dec_and_test implies a barrier */
115 if (atomic_dec_and_test(&eb->blocking_readers))
116 cond_wake_up_nomb(&eb->read_lock_wq);
117}
118
119void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500120{
Chris Masonea4ebde2014-06-19 14:16:52 -0700121 /*
122 * no lock is required. The lock owner may change if
123 * we have a read lock, but it won't change to or away
124 * from us. If we have the write lock, we are the owner
125 * and it'll never change.
126 */
127 if (eb->lock_nested && current->pid == eb->lock_owner)
128 return;
David Sterbaaa12c022018-04-04 01:52:31 +0200129 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
130 write_lock(&eb->lock);
David Sterba843ccf92018-08-24 14:56:28 +0200131 btrfs_assert_spinning_writers_get(eb);
David Sterbaaa12c022018-04-04 01:52:31 +0200132 /* atomic_dec_and_test implies a barrier */
133 if (atomic_dec_and_test(&eb->blocking_writers))
134 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500135}
136
137/*
Chris Masonbd681512011-07-16 15:23:14 -0400138 * take a spinning read lock. This will wait for any blocking
139 * writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500140 */
Chris Masonbd681512011-07-16 15:23:14 -0400141void btrfs_tree_read_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500142{
Chris Masonbd681512011-07-16 15:23:14 -0400143again:
Chris Masonea4ebde2014-06-19 14:16:52 -0700144 BUG_ON(!atomic_read(&eb->blocking_writers) &&
145 current->pid == eb->lock_owner);
146
Arne Jansen5b25f702011-09-13 10:55:48 +0200147 read_lock(&eb->lock);
148 if (atomic_read(&eb->blocking_writers) &&
149 current->pid == eb->lock_owner) {
150 /*
151 * This extent is already write-locked by our thread. We allow
152 * an additional read lock to be added because it's for the same
153 * thread. btrfs_find_all_roots() depends on this as it may be
154 * called on a partly (write-)locked tree.
155 */
156 BUG_ON(eb->lock_nested);
157 eb->lock_nested = 1;
158 read_unlock(&eb->lock);
159 return;
160 }
Chris Masonbd681512011-07-16 15:23:14 -0400161 if (atomic_read(&eb->blocking_writers)) {
162 read_unlock(&eb->lock);
Liu Bo39f9d022012-12-27 09:01:22 +0000163 wait_event(eb->write_lock_wq,
164 atomic_read(&eb->blocking_writers) == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400165 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500166 }
David Sterba5c9c7992018-08-24 16:15:51 +0200167 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200168 btrfs_assert_spinning_readers_get(eb);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500169}
170
171/*
Chris Masonf82c4582014-11-19 10:25:09 -0800172 * take a spinning read lock.
173 * returns 1 if we get the read lock and 0 if we don't
174 * this won't wait for blocking writers
175 */
176int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
177{
178 if (atomic_read(&eb->blocking_writers))
179 return 0;
180
181 read_lock(&eb->lock);
182 if (atomic_read(&eb->blocking_writers)) {
183 read_unlock(&eb->lock);
184 return 0;
185 }
David Sterba5c9c7992018-08-24 16:15:51 +0200186 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200187 btrfs_assert_spinning_readers_get(eb);
Chris Masonf82c4582014-11-19 10:25:09 -0800188 return 1;
189}
190
191/*
Chris Masonbd681512011-07-16 15:23:14 -0400192 * returns 1 if we get the read lock and 0 if we don't
193 * this won't wait for blocking writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500194 */
Chris Masonbd681512011-07-16 15:23:14 -0400195int btrfs_try_tree_read_lock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400196{
Chris Masonbd681512011-07-16 15:23:14 -0400197 if (atomic_read(&eb->blocking_writers))
198 return 0;
Chris Mason66d7e852009-03-12 20:12:45 -0400199
Chris Masonea4ebde2014-06-19 14:16:52 -0700200 if (!read_trylock(&eb->lock))
201 return 0;
202
Chris Masonbd681512011-07-16 15:23:14 -0400203 if (atomic_read(&eb->blocking_writers)) {
204 read_unlock(&eb->lock);
205 return 0;
Chris Masonf9efa9c2008-06-25 16:14:04 -0400206 }
David Sterba5c9c7992018-08-24 16:15:51 +0200207 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200208 btrfs_assert_spinning_readers_get(eb);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500209 return 1;
210}
211
212/*
Chris Masonbd681512011-07-16 15:23:14 -0400213 * returns 1 if we get the read lock and 0 if we don't
214 * this won't wait for blocking writers or readers
215 */
216int btrfs_try_tree_write_lock(struct extent_buffer *eb)
217{
218 if (atomic_read(&eb->blocking_writers) ||
219 atomic_read(&eb->blocking_readers))
220 return 0;
Chris Masonea4ebde2014-06-19 14:16:52 -0700221
Chris Masonf82c4582014-11-19 10:25:09 -0800222 write_lock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400223 if (atomic_read(&eb->blocking_writers) ||
224 atomic_read(&eb->blocking_readers)) {
225 write_unlock(&eb->lock);
226 return 0;
227 }
228 atomic_inc(&eb->write_locks);
David Sterba843ccf92018-08-24 14:56:28 +0200229 btrfs_assert_spinning_writers_get(eb);
Arne Jansen5b25f702011-09-13 10:55:48 +0200230 eb->lock_owner = current->pid;
Chris Masonbd681512011-07-16 15:23:14 -0400231 return 1;
232}
233
234/*
235 * drop a spinning read lock
236 */
237void btrfs_tree_read_unlock(struct extent_buffer *eb)
238{
Chris Masonea4ebde2014-06-19 14:16:52 -0700239 /*
240 * if we're nested, we have the write lock. No new locking
241 * is needed as long as we are the lock owner.
242 * The write unlock will do a barrier for us, and the lock_nested
243 * field only matters to the lock owner.
244 */
245 if (eb->lock_nested && current->pid == eb->lock_owner) {
246 eb->lock_nested = 0;
247 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200248 }
Chris Masonbd681512011-07-16 15:23:14 -0400249 btrfs_assert_tree_read_locked(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200250 btrfs_assert_spinning_readers_put(eb);
David Sterba5c9c7992018-08-24 16:15:51 +0200251 btrfs_assert_tree_read_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400252 read_unlock(&eb->lock);
253}
254
255/*
256 * drop a blocking read lock
257 */
258void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
259{
Chris Masonea4ebde2014-06-19 14:16:52 -0700260 /*
261 * if we're nested, we have the write lock. No new locking
262 * is needed as long as we are the lock owner.
263 * The write unlock will do a barrier for us, and the lock_nested
264 * field only matters to the lock owner.
265 */
266 if (eb->lock_nested && current->pid == eb->lock_owner) {
267 eb->lock_nested = 0;
268 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200269 }
Chris Masonbd681512011-07-16 15:23:14 -0400270 btrfs_assert_tree_read_locked(eb);
271 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
David Sterba093258e2018-02-26 16:15:17 +0100272 /* atomic_dec_and_test implies a barrier */
273 if (atomic_dec_and_test(&eb->blocking_readers))
274 cond_wake_up_nomb(&eb->read_lock_wq);
David Sterba5c9c7992018-08-24 16:15:51 +0200275 btrfs_assert_tree_read_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400276}
277
278/*
279 * take a spinning write lock. This will wait for both
280 * blocking readers or writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500281 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100282void btrfs_tree_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500283{
Zhaolei166f66d2015-08-06 22:39:36 +0800284 WARN_ON(eb->lock_owner == current->pid);
Chris Masonbd681512011-07-16 15:23:14 -0400285again:
286 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
287 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
288 write_lock(&eb->lock);
David Sterba970e74d2018-04-04 02:11:50 +0200289 if (atomic_read(&eb->blocking_readers) ||
290 atomic_read(&eb->blocking_writers)) {
Chris Masonbd681512011-07-16 15:23:14 -0400291 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400292 goto again;
293 }
David Sterba843ccf92018-08-24 14:56:28 +0200294 btrfs_assert_spinning_writers_get(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400295 atomic_inc(&eb->write_locks);
Arne Jansen5b25f702011-09-13 10:55:48 +0200296 eb->lock_owner = current->pid;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500297}
298
Chris Masonbd681512011-07-16 15:23:14 -0400299/*
300 * drop a spinning or a blocking write lock.
301 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100302void btrfs_tree_unlock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400303{
Chris Masonbd681512011-07-16 15:23:14 -0400304 int blockers = atomic_read(&eb->blocking_writers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500305
Chris Masonbd681512011-07-16 15:23:14 -0400306 BUG_ON(blockers > 1);
307
308 btrfs_assert_tree_locked(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700309 eb->lock_owner = 0;
Chris Masonbd681512011-07-16 15:23:14 -0400310 atomic_dec(&eb->write_locks);
311
312 if (blockers) {
David Sterba843ccf92018-08-24 14:56:28 +0200313 btrfs_assert_no_spinning_writers(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400314 atomic_dec(&eb->blocking_writers);
David Sterba093258e2018-02-26 16:15:17 +0100315 /* Use the lighter barrier after atomic */
Nikolay Borisov2e32ef82018-02-14 14:37:26 +0200316 smp_mb__after_atomic();
David Sterba093258e2018-02-26 16:15:17 +0100317 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400318 } else {
David Sterba843ccf92018-08-24 14:56:28 +0200319 btrfs_assert_spinning_writers_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400320 write_unlock(&eb->lock);
321 }
Chris Mason925baed2008-06-25 16:01:30 -0400322}
323
Chris Masonb9447ef82009-03-09 11:45:38 -0400324void btrfs_assert_tree_locked(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400325{
Chris Masonbd681512011-07-16 15:23:14 -0400326 BUG_ON(!atomic_read(&eb->write_locks));
327}