blob: 98fccce4208ca2d22e823253298d605f4545c337 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
David Sterbac1d7c512018-04-03 19:23:33 +02005
Chris Mason925baed2008-06-25 16:01:30 -04006#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -04007#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040010#include <asm/bug.h>
Chris Mason925baed2008-06-25 16:01:30 -040011#include "ctree.h"
12#include "extent_io.h"
13#include "locking.h"
14
David Sterbae4e9fd02018-08-24 14:45:20 +020015#ifdef CONFIG_BTRFS_DEBUG
16static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
17{
David Sterbaf3dc24c2019-05-02 16:51:53 +020018 WARN_ON(eb->spinning_writers);
19 eb->spinning_writers++;
David Sterbae4e9fd02018-08-24 14:45:20 +020020}
21
22static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
23{
David Sterbaf3dc24c2019-05-02 16:51:53 +020024 WARN_ON(eb->spinning_writers != 1);
25 eb->spinning_writers--;
David Sterbae4e9fd02018-08-24 14:45:20 +020026}
27
28static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
29{
David Sterbaf3dc24c2019-05-02 16:51:53 +020030 WARN_ON(eb->spinning_writers);
David Sterbae4e9fd02018-08-24 14:45:20 +020031}
32
David Sterba225948d2018-08-24 15:53:42 +020033static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
34{
35 atomic_inc(&eb->spinning_readers);
36}
37
38static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
39{
40 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
41 atomic_dec(&eb->spinning_readers);
42}
43
David Sterba58a2ddae2018-08-24 16:13:41 +020044static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
45{
46 atomic_inc(&eb->read_locks);
47}
48
49static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
50{
51 atomic_dec(&eb->read_locks);
52}
53
54static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
55{
56 BUG_ON(!atomic_read(&eb->read_locks));
57}
58
David Sterbae3f15382018-08-24 16:20:02 +020059static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
60{
David Sterba00801ae2019-05-02 16:53:47 +020061 eb->write_locks++;
David Sterbae3f15382018-08-24 16:20:02 +020062}
63
64static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
65{
David Sterba00801ae2019-05-02 16:53:47 +020066 eb->write_locks--;
David Sterbae3f15382018-08-24 16:20:02 +020067}
68
69void btrfs_assert_tree_locked(struct extent_buffer *eb)
70{
David Sterba00801ae2019-05-02 16:53:47 +020071 BUG_ON(!eb->write_locks);
David Sterbae3f15382018-08-24 16:20:02 +020072}
73
David Sterbae4e9fd02018-08-24 14:45:20 +020074#else
75static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
76static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
77static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
David Sterba225948d2018-08-24 15:53:42 +020078static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
79static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
David Sterba58a2ddae2018-08-24 16:13:41 +020080static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
81static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
82static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
David Sterbae3f15382018-08-24 16:20:02 +020083void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
84static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
85static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
David Sterbae4e9fd02018-08-24 14:45:20 +020086#endif
87
David Sterbab95be2d2018-04-04 01:43:05 +020088void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050089{
Qu Wenruo31aab402019-04-15 21:15:25 +080090 trace_btrfs_set_lock_blocking_read(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -070091 /*
David Sterbab95be2d2018-04-04 01:43:05 +020092 * No lock is required. The lock owner may change if we have a read
93 * lock, but it won't change to or away from us. If we have the write
94 * lock, we are the owner and it'll never change.
Chris Masonea4ebde2014-06-19 14:16:52 -070095 */
96 if (eb->lock_nested && current->pid == eb->lock_owner)
97 return;
David Sterbab95be2d2018-04-04 01:43:05 +020098 btrfs_assert_tree_read_locked(eb);
99 atomic_inc(&eb->blocking_readers);
David Sterbaafd495a2018-08-24 15:57:38 +0200100 btrfs_assert_spinning_readers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +0200101 read_unlock(&eb->lock);
102}
103
104void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
105{
Qu Wenruo31aab402019-04-15 21:15:25 +0800106 trace_btrfs_set_lock_blocking_write(eb);
David Sterbab95be2d2018-04-04 01:43:05 +0200107 /*
108 * No lock is required. The lock owner may change if we have a read
109 * lock, but it won't change to or away from us. If we have the write
110 * lock, we are the owner and it'll never change.
111 */
112 if (eb->lock_nested && current->pid == eb->lock_owner)
113 return;
David Sterba06297d82019-05-02 16:47:23 +0200114 if (eb->blocking_writers == 0) {
David Sterba843ccf92018-08-24 14:56:28 +0200115 btrfs_assert_spinning_writers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +0200116 btrfs_assert_tree_locked(eb);
David Sterba06297d82019-05-02 16:47:23 +0200117 eb->blocking_writers++;
David Sterbab95be2d2018-04-04 01:43:05 +0200118 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400119 }
Chris Masonb4ce94d2009-02-04 09:25:08 -0500120}
Chris Masond3977122009-01-05 21:25:51 -0500121
David Sterbaaa12c022018-04-04 01:52:31 +0200122void btrfs_clear_lock_blocking_read(struct extent_buffer *eb)
123{
Qu Wenruo31aab402019-04-15 21:15:25 +0800124 trace_btrfs_clear_lock_blocking_read(eb);
David Sterbaaa12c022018-04-04 01:52:31 +0200125 /*
126 * No lock is required. The lock owner may change if we have a read
127 * lock, but it won't change to or away from us. If we have the write
128 * lock, we are the owner and it'll never change.
129 */
130 if (eb->lock_nested && current->pid == eb->lock_owner)
131 return;
132 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
133 read_lock(&eb->lock);
David Sterbaafd495a2018-08-24 15:57:38 +0200134 btrfs_assert_spinning_readers_get(eb);
David Sterbaaa12c022018-04-04 01:52:31 +0200135 /* atomic_dec_and_test implies a barrier */
136 if (atomic_dec_and_test(&eb->blocking_readers))
137 cond_wake_up_nomb(&eb->read_lock_wq);
138}
139
140void btrfs_clear_lock_blocking_write(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500141{
Qu Wenruo31aab402019-04-15 21:15:25 +0800142 trace_btrfs_clear_lock_blocking_write(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700143 /*
144 * no lock is required. The lock owner may change if
145 * we have a read lock, but it won't change to or away
146 * from us. If we have the write lock, we are the owner
147 * and it'll never change.
148 */
149 if (eb->lock_nested && current->pid == eb->lock_owner)
150 return;
David Sterbaaa12c022018-04-04 01:52:31 +0200151 write_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200152 BUG_ON(eb->blocking_writers != 1);
David Sterba843ccf92018-08-24 14:56:28 +0200153 btrfs_assert_spinning_writers_get(eb);
David Sterba06297d82019-05-02 16:47:23 +0200154 if (--eb->blocking_writers == 0)
155 cond_wake_up(&eb->write_lock_wq);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500156}
157
158/*
Chris Masonbd681512011-07-16 15:23:14 -0400159 * take a spinning read lock. This will wait for any blocking
160 * writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500161 */
Chris Masonbd681512011-07-16 15:23:14 -0400162void btrfs_tree_read_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500163{
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800164 u64 start_ns = 0;
165
166 if (trace_btrfs_tree_read_lock_enabled())
167 start_ns = ktime_get_ns();
Chris Masonbd681512011-07-16 15:23:14 -0400168again:
Arne Jansen5b25f702011-09-13 10:55:48 +0200169 read_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200170 BUG_ON(eb->blocking_writers == 0 &&
171 current->pid == eb->lock_owner);
172 if (eb->blocking_writers && current->pid == eb->lock_owner) {
Arne Jansen5b25f702011-09-13 10:55:48 +0200173 /*
174 * This extent is already write-locked by our thread. We allow
175 * an additional read lock to be added because it's for the same
176 * thread. btrfs_find_all_roots() depends on this as it may be
177 * called on a partly (write-)locked tree.
178 */
179 BUG_ON(eb->lock_nested);
David Sterbaed1b4ed2018-08-24 16:31:17 +0200180 eb->lock_nested = true;
Arne Jansen5b25f702011-09-13 10:55:48 +0200181 read_unlock(&eb->lock);
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800182 trace_btrfs_tree_read_lock(eb, start_ns);
Arne Jansen5b25f702011-09-13 10:55:48 +0200183 return;
184 }
David Sterba06297d82019-05-02 16:47:23 +0200185 if (eb->blocking_writers) {
Chris Masonbd681512011-07-16 15:23:14 -0400186 read_unlock(&eb->lock);
Liu Bo39f9d022012-12-27 09:01:22 +0000187 wait_event(eb->write_lock_wq,
David Sterba06297d82019-05-02 16:47:23 +0200188 eb->blocking_writers == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400189 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500190 }
David Sterba5c9c7992018-08-24 16:15:51 +0200191 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200192 btrfs_assert_spinning_readers_get(eb);
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800193 trace_btrfs_tree_read_lock(eb, start_ns);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500194}
195
196/*
Chris Masonf82c4582014-11-19 10:25:09 -0800197 * take a spinning read lock.
198 * returns 1 if we get the read lock and 0 if we don't
199 * this won't wait for blocking writers
200 */
201int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
202{
David Sterba06297d82019-05-02 16:47:23 +0200203 if (eb->blocking_writers)
Chris Masonf82c4582014-11-19 10:25:09 -0800204 return 0;
205
206 read_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200207 if (eb->blocking_writers) {
Chris Masonf82c4582014-11-19 10:25:09 -0800208 read_unlock(&eb->lock);
209 return 0;
210 }
David Sterba5c9c7992018-08-24 16:15:51 +0200211 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200212 btrfs_assert_spinning_readers_get(eb);
Qu Wenruo31aab402019-04-15 21:15:25 +0800213 trace_btrfs_tree_read_lock_atomic(eb);
Chris Masonf82c4582014-11-19 10:25:09 -0800214 return 1;
215}
216
217/*
Chris Masonbd681512011-07-16 15:23:14 -0400218 * returns 1 if we get the read lock and 0 if we don't
219 * this won't wait for blocking writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500220 */
Chris Masonbd681512011-07-16 15:23:14 -0400221int btrfs_try_tree_read_lock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400222{
David Sterba06297d82019-05-02 16:47:23 +0200223 if (eb->blocking_writers)
Chris Masonbd681512011-07-16 15:23:14 -0400224 return 0;
Chris Mason66d7e852009-03-12 20:12:45 -0400225
Chris Masonea4ebde2014-06-19 14:16:52 -0700226 if (!read_trylock(&eb->lock))
227 return 0;
228
David Sterba06297d82019-05-02 16:47:23 +0200229 if (eb->blocking_writers) {
Chris Masonbd681512011-07-16 15:23:14 -0400230 read_unlock(&eb->lock);
231 return 0;
Chris Masonf9efa9c2008-06-25 16:14:04 -0400232 }
David Sterba5c9c7992018-08-24 16:15:51 +0200233 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200234 btrfs_assert_spinning_readers_get(eb);
Qu Wenruo31aab402019-04-15 21:15:25 +0800235 trace_btrfs_try_tree_read_lock(eb);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500236 return 1;
237}
238
239/*
Chris Masonbd681512011-07-16 15:23:14 -0400240 * returns 1 if we get the read lock and 0 if we don't
241 * this won't wait for blocking writers or readers
242 */
243int btrfs_try_tree_write_lock(struct extent_buffer *eb)
244{
David Sterba06297d82019-05-02 16:47:23 +0200245 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
Chris Masonbd681512011-07-16 15:23:14 -0400246 return 0;
Chris Masonea4ebde2014-06-19 14:16:52 -0700247
Chris Masonf82c4582014-11-19 10:25:09 -0800248 write_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200249 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
Chris Masonbd681512011-07-16 15:23:14 -0400250 write_unlock(&eb->lock);
251 return 0;
252 }
David Sterbac79adfc2018-08-24 16:24:26 +0200253 btrfs_assert_tree_write_locks_get(eb);
David Sterba843ccf92018-08-24 14:56:28 +0200254 btrfs_assert_spinning_writers_get(eb);
Arne Jansen5b25f702011-09-13 10:55:48 +0200255 eb->lock_owner = current->pid;
Qu Wenruo31aab402019-04-15 21:15:25 +0800256 trace_btrfs_try_tree_write_lock(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400257 return 1;
258}
259
260/*
261 * drop a spinning read lock
262 */
263void btrfs_tree_read_unlock(struct extent_buffer *eb)
264{
Qu Wenruo31aab402019-04-15 21:15:25 +0800265 trace_btrfs_tree_read_unlock(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700266 /*
267 * if we're nested, we have the write lock. No new locking
268 * is needed as long as we are the lock owner.
269 * The write unlock will do a barrier for us, and the lock_nested
270 * field only matters to the lock owner.
271 */
272 if (eb->lock_nested && current->pid == eb->lock_owner) {
David Sterbaed1b4ed2018-08-24 16:31:17 +0200273 eb->lock_nested = false;
Chris Masonea4ebde2014-06-19 14:16:52 -0700274 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200275 }
Chris Masonbd681512011-07-16 15:23:14 -0400276 btrfs_assert_tree_read_locked(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200277 btrfs_assert_spinning_readers_put(eb);
David Sterba5c9c7992018-08-24 16:15:51 +0200278 btrfs_assert_tree_read_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400279 read_unlock(&eb->lock);
280}
281
282/*
283 * drop a blocking read lock
284 */
285void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
286{
Qu Wenruo31aab402019-04-15 21:15:25 +0800287 trace_btrfs_tree_read_unlock_blocking(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700288 /*
289 * if we're nested, we have the write lock. No new locking
290 * is needed as long as we are the lock owner.
291 * The write unlock will do a barrier for us, and the lock_nested
292 * field only matters to the lock owner.
293 */
294 if (eb->lock_nested && current->pid == eb->lock_owner) {
David Sterbaed1b4ed2018-08-24 16:31:17 +0200295 eb->lock_nested = false;
Chris Masonea4ebde2014-06-19 14:16:52 -0700296 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200297 }
Chris Masonbd681512011-07-16 15:23:14 -0400298 btrfs_assert_tree_read_locked(eb);
299 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
David Sterba093258e2018-02-26 16:15:17 +0100300 /* atomic_dec_and_test implies a barrier */
301 if (atomic_dec_and_test(&eb->blocking_readers))
302 cond_wake_up_nomb(&eb->read_lock_wq);
David Sterba5c9c7992018-08-24 16:15:51 +0200303 btrfs_assert_tree_read_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400304}
305
306/*
307 * take a spinning write lock. This will wait for both
308 * blocking readers or writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500309 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100310void btrfs_tree_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500311{
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800312 u64 start_ns = 0;
313
314 if (trace_btrfs_tree_lock_enabled())
315 start_ns = ktime_get_ns();
316
Zhaolei166f66d2015-08-06 22:39:36 +0800317 WARN_ON(eb->lock_owner == current->pid);
Chris Masonbd681512011-07-16 15:23:14 -0400318again:
319 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
David Sterba06297d82019-05-02 16:47:23 +0200320 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400321 write_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200322 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
Chris Masonbd681512011-07-16 15:23:14 -0400323 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400324 goto again;
325 }
David Sterba843ccf92018-08-24 14:56:28 +0200326 btrfs_assert_spinning_writers_get(eb);
David Sterbac79adfc2018-08-24 16:24:26 +0200327 btrfs_assert_tree_write_locks_get(eb);
Arne Jansen5b25f702011-09-13 10:55:48 +0200328 eb->lock_owner = current->pid;
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800329 trace_btrfs_tree_lock(eb, start_ns);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500330}
331
Chris Masonbd681512011-07-16 15:23:14 -0400332/*
333 * drop a spinning or a blocking write lock.
334 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100335void btrfs_tree_unlock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400336{
David Sterba06297d82019-05-02 16:47:23 +0200337 int blockers = eb->blocking_writers;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500338
Chris Masonbd681512011-07-16 15:23:14 -0400339 BUG_ON(blockers > 1);
340
341 btrfs_assert_tree_locked(eb);
Qu Wenruo31aab402019-04-15 21:15:25 +0800342 trace_btrfs_tree_unlock(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700343 eb->lock_owner = 0;
David Sterbac79adfc2018-08-24 16:24:26 +0200344 btrfs_assert_tree_write_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400345
346 if (blockers) {
David Sterba843ccf92018-08-24 14:56:28 +0200347 btrfs_assert_no_spinning_writers(eb);
David Sterba06297d82019-05-02 16:47:23 +0200348 eb->blocking_writers--;
David Sterba093258e2018-02-26 16:15:17 +0100349 /* Use the lighter barrier after atomic */
Nikolay Borisov2e32ef82018-02-14 14:37:26 +0200350 smp_mb__after_atomic();
David Sterba093258e2018-02-26 16:15:17 +0100351 cond_wake_up_nomb(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400352 } else {
David Sterba843ccf92018-08-24 14:56:28 +0200353 btrfs_assert_spinning_writers_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400354 write_unlock(&eb->lock);
355 }
Chris Mason925baed2008-06-25 16:01:30 -0400356}