blob: 13ef1decdea60b2b9b9e365ca990ba02e0f9d66d [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
David Sterbac1d7c512018-04-03 19:23:33 +02005
Chris Mason925baed2008-06-25 16:01:30 -04006#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -04007#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040010#include <asm/bug.h>
Chris Mason925baed2008-06-25 16:01:30 -040011#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
Eric Sandeen48a3b632013-04-25 20:41:01 +000015static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
Chris Masonbd681512011-07-16 15:23:14 -040016
David Sterbae4e9fd02018-08-24 14:45:20 +020017#ifdef CONFIG_BTRFS_DEBUG
18static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
19{
20 WARN_ON(atomic_read(&eb->spinning_writers));
21 atomic_inc(&eb->spinning_writers);
22}
23
24static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
25{
26 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
27 atomic_dec(&eb->spinning_writers);
28}
29
30static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
31{
32 WARN_ON(atomic_read(&eb->spinning_writers));
33}
34
35#else
36static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
37static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
38static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
39#endif
40
David Sterbab95be2d2018-04-04 01:43:05 +020041void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050042{
Chris Masonea4ebde2014-06-19 14:16:52 -070043 /*
David Sterbab95be2d2018-04-04 01:43:05 +020044 * No lock is required. The lock owner may change if we have a read
45 * lock, but it won't change to or away from us. If we have the write
46 * lock, we are the owner and it'll never change.
Chris Masonea4ebde2014-06-19 14:16:52 -070047 */
48 if (eb->lock_nested && current->pid == eb->lock_owner)
49 return;
David Sterbab95be2d2018-04-04 01:43:05 +020050 btrfs_assert_tree_read_locked(eb);
51 atomic_inc(&eb->blocking_readers);
52 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
53 atomic_dec(&eb->spinning_readers);
54 read_unlock(&eb->lock);
55}
56
57void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
58{
59 /*
60 * No lock is required. The lock owner may change if we have a read
61 * lock, but it won't change to or away from us. If we have the write
62 * lock, we are the owner and it'll never change.
63 */
64 if (eb->lock_nested && current->pid == eb->lock_owner)
65 return;
66 if (atomic_read(&eb->blocking_writers) == 0) {
67 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
68 atomic_dec(&eb->spinning_writers);
69 btrfs_assert_tree_locked(eb);
70 atomic_inc(&eb->blocking_writers);
71 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -040072 }
Chris Masonb4ce94d2009-02-04 09:25:08 -050073}
Chris Masond3977122009-01-05 21:25:51 -050074
David Sterbaaa12c022018-04-04 01:52:31 +020075void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
76{
77 /*
78 * No lock is required. The lock owner may change if we have a read
79 * lock, but it won't change to or away from us. If we have the write
80 * lock, we are the owner and it'll never change.
81 */
82 if (eb->lock_nested && current->pid == eb->lock_owner)
83 return;
84 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
85 read_lock(&eb->lock);
86 atomic_inc(&eb->spinning_readers);
87 /* atomic_dec_and_test implies a barrier */
88 if (atomic_dec_and_test(&eb->blocking_readers))
89 cond_wake_up_nomb(&eb->read_lock_wq);
90}
91
92void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050093{
Chris Masonea4ebde2014-06-19 14:16:52 -070094 /*
95 * no lock is required. The lock owner may change if
96 * we have a read lock, but it won't change to or away
97 * from us. If we have the write lock, we are the owner
98 * and it'll never change.
99 */
100 if (eb->lock_nested && current->pid == eb->lock_owner)
101 return;
David Sterbaaa12c022018-04-04 01:52:31 +0200102 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
103 write_lock(&eb->lock);
104 WARN_ON(atomic_read(&eb->spinning_writers));
105 atomic_inc(&eb->spinning_writers);
106 /* atomic_dec_and_test implies a barrier */
107 if (atomic_dec_and_test(&eb->blocking_writers))
108 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500109}
110
111/*
Chris Masonbd681512011-07-16 15:23:14 -0400112 * take a spinning read lock. This will wait for any blocking
113 * writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500114 */
Chris Masonbd681512011-07-16 15:23:14 -0400115void btrfs_tree_read_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500116{
Chris Masonbd681512011-07-16 15:23:14 -0400117again:
Chris Masonea4ebde2014-06-19 14:16:52 -0700118 BUG_ON(!atomic_read(&eb->blocking_writers) &&
119 current->pid == eb->lock_owner);
120
Arne Jansen5b25f702011-09-13 10:55:48 +0200121 read_lock(&eb->lock);
122 if (atomic_read(&eb->blocking_writers) &&
123 current->pid == eb->lock_owner) {
124 /*
125 * This extent is already write-locked by our thread. We allow
126 * an additional read lock to be added because it's for the same
127 * thread. btrfs_find_all_roots() depends on this as it may be
128 * called on a partly (write-)locked tree.
129 */
130 BUG_ON(eb->lock_nested);
131 eb->lock_nested = 1;
132 read_unlock(&eb->lock);
133 return;
134 }
Chris Masonbd681512011-07-16 15:23:14 -0400135 if (atomic_read(&eb->blocking_writers)) {
136 read_unlock(&eb->lock);
Liu Bo39f9d022012-12-27 09:01:22 +0000137 wait_event(eb->write_lock_wq,
138 atomic_read(&eb->blocking_writers) == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400139 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500140 }
Chris Masonbd681512011-07-16 15:23:14 -0400141 atomic_inc(&eb->read_locks);
142 atomic_inc(&eb->spinning_readers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500143}
144
145/*
Chris Masonf82c4582014-11-19 10:25:09 -0800146 * take a spinning read lock.
147 * returns 1 if we get the read lock and 0 if we don't
148 * this won't wait for blocking writers
149 */
150int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
151{
152 if (atomic_read(&eb->blocking_writers))
153 return 0;
154
155 read_lock(&eb->lock);
156 if (atomic_read(&eb->blocking_writers)) {
157 read_unlock(&eb->lock);
158 return 0;
159 }
160 atomic_inc(&eb->read_locks);
161 atomic_inc(&eb->spinning_readers);
162 return 1;
163}
164
165/*
Chris Masonbd681512011-07-16 15:23:14 -0400166 * returns 1 if we get the read lock and 0 if we don't
167 * this won't wait for blocking writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500168 */
Chris Masonbd681512011-07-16 15:23:14 -0400169int btrfs_try_tree_read_lock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400170{
Chris Masonbd681512011-07-16 15:23:14 -0400171 if (atomic_read(&eb->blocking_writers))
172 return 0;
Chris Mason66d7e852009-03-12 20:12:45 -0400173
Chris Masonea4ebde2014-06-19 14:16:52 -0700174 if (!read_trylock(&eb->lock))
175 return 0;
176
Chris Masonbd681512011-07-16 15:23:14 -0400177 if (atomic_read(&eb->blocking_writers)) {
178 read_unlock(&eb->lock);
179 return 0;
Chris Masonf9efa9c2008-06-25 16:14:04 -0400180 }
Chris Masonbd681512011-07-16 15:23:14 -0400181 atomic_inc(&eb->read_locks);
182 atomic_inc(&eb->spinning_readers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500183 return 1;
184}
185
186/*
Chris Masonbd681512011-07-16 15:23:14 -0400187 * returns 1 if we get the read lock and 0 if we don't
188 * this won't wait for blocking writers or readers
189 */
190int btrfs_try_tree_write_lock(struct extent_buffer *eb)
191{
192 if (atomic_read(&eb->blocking_writers) ||
193 atomic_read(&eb->blocking_readers))
194 return 0;
Chris Masonea4ebde2014-06-19 14:16:52 -0700195
Chris Masonf82c4582014-11-19 10:25:09 -0800196 write_lock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400197 if (atomic_read(&eb->blocking_writers) ||
198 atomic_read(&eb->blocking_readers)) {
199 write_unlock(&eb->lock);
200 return 0;
201 }
202 atomic_inc(&eb->write_locks);
203 atomic_inc(&eb->spinning_writers);
Arne Jansen5b25f702011-09-13 10:55:48 +0200204 eb->lock_owner = current->pid;
Chris Masonbd681512011-07-16 15:23:14 -0400205 return 1;
206}
207
208/*
209 * drop a spinning read lock
210 */
211void btrfs_tree_read_unlock(struct extent_buffer *eb)
212{
Chris Masonea4ebde2014-06-19 14:16:52 -0700213 /*
214 * if we're nested, we have the write lock. No new locking
215 * is needed as long as we are the lock owner.
216 * The write unlock will do a barrier for us, and the lock_nested
217 * field only matters to the lock owner.
218 */
219 if (eb->lock_nested && current->pid == eb->lock_owner) {
220 eb->lock_nested = 0;
221 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200222 }
Chris Masonbd681512011-07-16 15:23:14 -0400223 btrfs_assert_tree_read_locked(eb);
224 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
225 atomic_dec(&eb->spinning_readers);
226 atomic_dec(&eb->read_locks);
227 read_unlock(&eb->lock);
228}
229
230/*
231 * drop a blocking read lock
232 */
233void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
234{
Chris Masonea4ebde2014-06-19 14:16:52 -0700235 /*
236 * if we're nested, we have the write lock. No new locking
237 * is needed as long as we are the lock owner.
238 * The write unlock will do a barrier for us, and the lock_nested
239 * field only matters to the lock owner.
240 */
241 if (eb->lock_nested && current->pid == eb->lock_owner) {
242 eb->lock_nested = 0;
243 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200244 }
Chris Masonbd681512011-07-16 15:23:14 -0400245 btrfs_assert_tree_read_locked(eb);
246 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
David Sterba093258e2018-02-26 16:15:17 +0100247 /* atomic_dec_and_test implies a barrier */
248 if (atomic_dec_and_test(&eb->blocking_readers))
249 cond_wake_up_nomb(&eb->read_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400250 atomic_dec(&eb->read_locks);
251}
252
253/*
254 * take a spinning write lock. This will wait for both
255 * blocking readers or writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500256 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100257void btrfs_tree_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500258{
Zhaolei166f66d2015-08-06 22:39:36 +0800259 WARN_ON(eb->lock_owner == current->pid);
Chris Masonbd681512011-07-16 15:23:14 -0400260again:
261 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
262 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
263 write_lock(&eb->lock);
David Sterba970e74d2018-04-04 02:11:50 +0200264 if (atomic_read(&eb->blocking_readers) ||
265 atomic_read(&eb->blocking_writers)) {
Chris Masonbd681512011-07-16 15:23:14 -0400266 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400267 goto again;
268 }
269 WARN_ON(atomic_read(&eb->spinning_writers));
270 atomic_inc(&eb->spinning_writers);
271 atomic_inc(&eb->write_locks);
Arne Jansen5b25f702011-09-13 10:55:48 +0200272 eb->lock_owner = current->pid;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500273}
274
Chris Masonbd681512011-07-16 15:23:14 -0400275/*
276 * drop a spinning or a blocking write lock.
277 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100278void btrfs_tree_unlock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400279{
Chris Masonbd681512011-07-16 15:23:14 -0400280 int blockers = atomic_read(&eb->blocking_writers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500281
Chris Masonbd681512011-07-16 15:23:14 -0400282 BUG_ON(blockers > 1);
283
284 btrfs_assert_tree_locked(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700285 eb->lock_owner = 0;
Chris Masonbd681512011-07-16 15:23:14 -0400286 atomic_dec(&eb->write_locks);
287
288 if (blockers) {
289 WARN_ON(atomic_read(&eb->spinning_writers));
290 atomic_dec(&eb->blocking_writers);
David Sterba093258e2018-02-26 16:15:17 +0100291 /* Use the lighter barrier after atomic */
Nikolay Borisov2e32ef82018-02-14 14:37:26 +0200292 smp_mb__after_atomic();
David Sterba093258e2018-02-26 16:15:17 +0100293 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400294 } else {
295 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
296 atomic_dec(&eb->spinning_writers);
297 write_unlock(&eb->lock);
298 }
Chris Mason925baed2008-06-25 16:01:30 -0400299}
300
Chris Masonb9447ef82009-03-09 11:45:38 -0400301void btrfs_assert_tree_locked(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400302{
Chris Masonbd681512011-07-16 15:23:14 -0400303 BUG_ON(!atomic_read(&eb->write_locks));
304}
305
Eric Sandeen48a3b632013-04-25 20:41:01 +0000306static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
Chris Masonbd681512011-07-16 15:23:14 -0400307{
308 BUG_ON(!atomic_read(&eb->read_locks));
Chris Mason925baed2008-06-25 16:01:30 -0400309}