blob: 7201d000f61d08e3e1e9c0d2d74529ec608777fc [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
David Sterbac1d7c512018-04-03 19:23:33 +02005
Chris Mason925baed2008-06-25 16:01:30 -04006#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -04007#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040010#include <asm/bug.h>
Chris Mason925baed2008-06-25 16:01:30 -040011#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
Eric Sandeen48a3b632013-04-25 20:41:01 +000015static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
Chris Masonbd681512011-07-16 15:23:14 -040016
David Sterbab95be2d2018-04-04 01:43:05 +020017void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050018{
Chris Masonea4ebde2014-06-19 14:16:52 -070019 /*
David Sterbab95be2d2018-04-04 01:43:05 +020020 * No lock is required. The lock owner may change if we have a read
21 * lock, but it won't change to or away from us. If we have the write
22 * lock, we are the owner and it'll never change.
Chris Masonea4ebde2014-06-19 14:16:52 -070023 */
24 if (eb->lock_nested && current->pid == eb->lock_owner)
25 return;
David Sterbab95be2d2018-04-04 01:43:05 +020026 btrfs_assert_tree_read_locked(eb);
27 atomic_inc(&eb->blocking_readers);
28 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
29 atomic_dec(&eb->spinning_readers);
30 read_unlock(&eb->lock);
31}
32
33void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
34{
35 /*
36 * No lock is required. The lock owner may change if we have a read
37 * lock, but it won't change to or away from us. If we have the write
38 * lock, we are the owner and it'll never change.
39 */
40 if (eb->lock_nested && current->pid == eb->lock_owner)
41 return;
42 if (atomic_read(&eb->blocking_writers) == 0) {
43 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
44 atomic_dec(&eb->spinning_writers);
45 btrfs_assert_tree_locked(eb);
46 atomic_inc(&eb->blocking_writers);
47 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -040048 }
Chris Masonb4ce94d2009-02-04 09:25:08 -050049}
Chris Masond3977122009-01-05 21:25:51 -050050
Chris Masonb4ce94d2009-02-04 09:25:08 -050051/*
Chris Masonbd681512011-07-16 15:23:14 -040052 * if we currently have a blocking lock, take the spinlock
53 * and drop our blocking count
Chris Masonb4ce94d2009-02-04 09:25:08 -050054 */
Chris Masonbd681512011-07-16 15:23:14 -040055void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
Chris Masonb4ce94d2009-02-04 09:25:08 -050056{
Chris Masonea4ebde2014-06-19 14:16:52 -070057 /*
58 * no lock is required. The lock owner may change if
59 * we have a read lock, but it won't change to or away
60 * from us. If we have the write lock, we are the owner
61 * and it'll never change.
62 */
63 if (eb->lock_nested && current->pid == eb->lock_owner)
64 return;
65
Chris Masonbd681512011-07-16 15:23:14 -040066 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
67 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
68 write_lock(&eb->lock);
69 WARN_ON(atomic_read(&eb->spinning_writers));
70 atomic_inc(&eb->spinning_writers);
David Sterba093258e2018-02-26 16:15:17 +010071 /* atomic_dec_and_test implies a barrier */
72 if (atomic_dec_and_test(&eb->blocking_writers))
73 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -040074 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
75 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
76 read_lock(&eb->lock);
77 atomic_inc(&eb->spinning_readers);
David Sterba093258e2018-02-26 16:15:17 +010078 /* atomic_dec_and_test implies a barrier */
79 if (atomic_dec_and_test(&eb->blocking_readers))
80 cond_wake_up_nomb(&eb->read_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -050081 }
Chris Masonb4ce94d2009-02-04 09:25:08 -050082}
83
84/*
Chris Masonbd681512011-07-16 15:23:14 -040085 * take a spinning read lock. This will wait for any blocking
86 * writers
Chris Masonb4ce94d2009-02-04 09:25:08 -050087 */
Chris Masonbd681512011-07-16 15:23:14 -040088void btrfs_tree_read_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050089{
Chris Masonbd681512011-07-16 15:23:14 -040090again:
Chris Masonea4ebde2014-06-19 14:16:52 -070091 BUG_ON(!atomic_read(&eb->blocking_writers) &&
92 current->pid == eb->lock_owner);
93
Arne Jansen5b25f702011-09-13 10:55:48 +020094 read_lock(&eb->lock);
95 if (atomic_read(&eb->blocking_writers) &&
96 current->pid == eb->lock_owner) {
97 /*
98 * This extent is already write-locked by our thread. We allow
99 * an additional read lock to be added because it's for the same
100 * thread. btrfs_find_all_roots() depends on this as it may be
101 * called on a partly (write-)locked tree.
102 */
103 BUG_ON(eb->lock_nested);
104 eb->lock_nested = 1;
105 read_unlock(&eb->lock);
106 return;
107 }
Chris Masonbd681512011-07-16 15:23:14 -0400108 if (atomic_read(&eb->blocking_writers)) {
109 read_unlock(&eb->lock);
Liu Bo39f9d022012-12-27 09:01:22 +0000110 wait_event(eb->write_lock_wq,
111 atomic_read(&eb->blocking_writers) == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400112 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500113 }
Chris Masonbd681512011-07-16 15:23:14 -0400114 atomic_inc(&eb->read_locks);
115 atomic_inc(&eb->spinning_readers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500116}
117
118/*
Chris Masonf82c4582014-11-19 10:25:09 -0800119 * take a spinning read lock.
120 * returns 1 if we get the read lock and 0 if we don't
121 * this won't wait for blocking writers
122 */
123int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
124{
125 if (atomic_read(&eb->blocking_writers))
126 return 0;
127
128 read_lock(&eb->lock);
129 if (atomic_read(&eb->blocking_writers)) {
130 read_unlock(&eb->lock);
131 return 0;
132 }
133 atomic_inc(&eb->read_locks);
134 atomic_inc(&eb->spinning_readers);
135 return 1;
136}
137
138/*
Chris Masonbd681512011-07-16 15:23:14 -0400139 * returns 1 if we get the read lock and 0 if we don't
140 * this won't wait for blocking writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500141 */
Chris Masonbd681512011-07-16 15:23:14 -0400142int btrfs_try_tree_read_lock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400143{
Chris Masonbd681512011-07-16 15:23:14 -0400144 if (atomic_read(&eb->blocking_writers))
145 return 0;
Chris Mason66d7e852009-03-12 20:12:45 -0400146
Chris Masonea4ebde2014-06-19 14:16:52 -0700147 if (!read_trylock(&eb->lock))
148 return 0;
149
Chris Masonbd681512011-07-16 15:23:14 -0400150 if (atomic_read(&eb->blocking_writers)) {
151 read_unlock(&eb->lock);
152 return 0;
Chris Masonf9efa9c2008-06-25 16:14:04 -0400153 }
Chris Masonbd681512011-07-16 15:23:14 -0400154 atomic_inc(&eb->read_locks);
155 atomic_inc(&eb->spinning_readers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500156 return 1;
157}
158
159/*
Chris Masonbd681512011-07-16 15:23:14 -0400160 * returns 1 if we get the read lock and 0 if we don't
161 * this won't wait for blocking writers or readers
162 */
163int btrfs_try_tree_write_lock(struct extent_buffer *eb)
164{
165 if (atomic_read(&eb->blocking_writers) ||
166 atomic_read(&eb->blocking_readers))
167 return 0;
Chris Masonea4ebde2014-06-19 14:16:52 -0700168
Chris Masonf82c4582014-11-19 10:25:09 -0800169 write_lock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400170 if (atomic_read(&eb->blocking_writers) ||
171 atomic_read(&eb->blocking_readers)) {
172 write_unlock(&eb->lock);
173 return 0;
174 }
175 atomic_inc(&eb->write_locks);
176 atomic_inc(&eb->spinning_writers);
Arne Jansen5b25f702011-09-13 10:55:48 +0200177 eb->lock_owner = current->pid;
Chris Masonbd681512011-07-16 15:23:14 -0400178 return 1;
179}
180
181/*
182 * drop a spinning read lock
183 */
184void btrfs_tree_read_unlock(struct extent_buffer *eb)
185{
Chris Masonea4ebde2014-06-19 14:16:52 -0700186 /*
187 * if we're nested, we have the write lock. No new locking
188 * is needed as long as we are the lock owner.
189 * The write unlock will do a barrier for us, and the lock_nested
190 * field only matters to the lock owner.
191 */
192 if (eb->lock_nested && current->pid == eb->lock_owner) {
193 eb->lock_nested = 0;
194 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200195 }
Chris Masonbd681512011-07-16 15:23:14 -0400196 btrfs_assert_tree_read_locked(eb);
197 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
198 atomic_dec(&eb->spinning_readers);
199 atomic_dec(&eb->read_locks);
200 read_unlock(&eb->lock);
201}
202
203/*
204 * drop a blocking read lock
205 */
206void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
207{
Chris Masonea4ebde2014-06-19 14:16:52 -0700208 /*
209 * if we're nested, we have the write lock. No new locking
210 * is needed as long as we are the lock owner.
211 * The write unlock will do a barrier for us, and the lock_nested
212 * field only matters to the lock owner.
213 */
214 if (eb->lock_nested && current->pid == eb->lock_owner) {
215 eb->lock_nested = 0;
216 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200217 }
Chris Masonbd681512011-07-16 15:23:14 -0400218 btrfs_assert_tree_read_locked(eb);
219 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
David Sterba093258e2018-02-26 16:15:17 +0100220 /* atomic_dec_and_test implies a barrier */
221 if (atomic_dec_and_test(&eb->blocking_readers))
222 cond_wake_up_nomb(&eb->read_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400223 atomic_dec(&eb->read_locks);
224}
225
226/*
227 * take a spinning write lock. This will wait for both
228 * blocking readers or writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500229 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100230void btrfs_tree_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500231{
Zhaolei166f66d2015-08-06 22:39:36 +0800232 WARN_ON(eb->lock_owner == current->pid);
Chris Masonbd681512011-07-16 15:23:14 -0400233again:
234 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
235 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
236 write_lock(&eb->lock);
237 if (atomic_read(&eb->blocking_readers)) {
238 write_unlock(&eb->lock);
239 wait_event(eb->read_lock_wq,
240 atomic_read(&eb->blocking_readers) == 0);
241 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500242 }
Chris Masonbd681512011-07-16 15:23:14 -0400243 if (atomic_read(&eb->blocking_writers)) {
244 write_unlock(&eb->lock);
245 wait_event(eb->write_lock_wq,
246 atomic_read(&eb->blocking_writers) == 0);
247 goto again;
248 }
249 WARN_ON(atomic_read(&eb->spinning_writers));
250 atomic_inc(&eb->spinning_writers);
251 atomic_inc(&eb->write_locks);
Arne Jansen5b25f702011-09-13 10:55:48 +0200252 eb->lock_owner = current->pid;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500253}
254
Chris Masonbd681512011-07-16 15:23:14 -0400255/*
256 * drop a spinning or a blocking write lock.
257 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100258void btrfs_tree_unlock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400259{
Chris Masonbd681512011-07-16 15:23:14 -0400260 int blockers = atomic_read(&eb->blocking_writers);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500261
Chris Masonbd681512011-07-16 15:23:14 -0400262 BUG_ON(blockers > 1);
263
264 btrfs_assert_tree_locked(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700265 eb->lock_owner = 0;
Chris Masonbd681512011-07-16 15:23:14 -0400266 atomic_dec(&eb->write_locks);
267
268 if (blockers) {
269 WARN_ON(atomic_read(&eb->spinning_writers));
270 atomic_dec(&eb->blocking_writers);
David Sterba093258e2018-02-26 16:15:17 +0100271 /* Use the lighter barrier after atomic */
Nikolay Borisov2e32ef82018-02-14 14:37:26 +0200272 smp_mb__after_atomic();
David Sterba093258e2018-02-26 16:15:17 +0100273 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400274 } else {
275 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
276 atomic_dec(&eb->spinning_writers);
277 write_unlock(&eb->lock);
278 }
Chris Mason925baed2008-06-25 16:01:30 -0400279}
280
Chris Masonb9447ef82009-03-09 11:45:38 -0400281void btrfs_assert_tree_locked(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400282{
Chris Masonbd681512011-07-16 15:23:14 -0400283 BUG_ON(!atomic_read(&eb->write_locks));
284}
285
Eric Sandeen48a3b632013-04-25 20:41:01 +0000286static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
Chris Masonbd681512011-07-16 15:23:14 -0400287{
288 BUG_ON(!atomic_read(&eb->read_locks));
Chris Mason925baed2008-06-25 16:01:30 -0400289}