blob: 7f9a578a1a206baa7b04c657d5807e6b815ae7a0 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
David Sterbac1d7c512018-04-03 19:23:33 +02005
Chris Mason925baed2008-06-25 16:01:30 -04006#include <linux/sched.h>
Chris Mason925baed2008-06-25 16:01:30 -04007#include <linux/pagemap.h>
8#include <linux/spinlock.h>
9#include <linux/page-flags.h>
Chris Mason4881ee52008-07-24 09:51:08 -040010#include <asm/bug.h>
David Sterba602cbe92019-08-21 18:48:25 +020011#include "misc.h"
Chris Mason925baed2008-06-25 16:01:30 -040012#include "ctree.h"
13#include "extent_io.h"
14#include "locking.h"
15
David Sterbae4e9fd02018-08-24 14:45:20 +020016#ifdef CONFIG_BTRFS_DEBUG
17static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb)
18{
David Sterbaf3dc24c2019-05-02 16:51:53 +020019 WARN_ON(eb->spinning_writers);
20 eb->spinning_writers++;
David Sterbae4e9fd02018-08-24 14:45:20 +020021}
22
23static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb)
24{
David Sterbaf3dc24c2019-05-02 16:51:53 +020025 WARN_ON(eb->spinning_writers != 1);
26 eb->spinning_writers--;
David Sterbae4e9fd02018-08-24 14:45:20 +020027}
28
29static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb)
30{
David Sterbaf3dc24c2019-05-02 16:51:53 +020031 WARN_ON(eb->spinning_writers);
David Sterbae4e9fd02018-08-24 14:45:20 +020032}
33
David Sterba225948d2018-08-24 15:53:42 +020034static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb)
35{
36 atomic_inc(&eb->spinning_readers);
37}
38
39static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb)
40{
41 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
42 atomic_dec(&eb->spinning_readers);
43}
44
David Sterba58a2ddae2018-08-24 16:13:41 +020045static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb)
46{
47 atomic_inc(&eb->read_locks);
48}
49
50static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb)
51{
52 atomic_dec(&eb->read_locks);
53}
54
55static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
56{
57 BUG_ON(!atomic_read(&eb->read_locks));
58}
59
David Sterbae3f15382018-08-24 16:20:02 +020060static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb)
61{
David Sterba00801ae2019-05-02 16:53:47 +020062 eb->write_locks++;
David Sterbae3f15382018-08-24 16:20:02 +020063}
64
65static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb)
66{
David Sterba00801ae2019-05-02 16:53:47 +020067 eb->write_locks--;
David Sterbae3f15382018-08-24 16:20:02 +020068}
69
70void btrfs_assert_tree_locked(struct extent_buffer *eb)
71{
David Sterba00801ae2019-05-02 16:53:47 +020072 BUG_ON(!eb->write_locks);
David Sterbae3f15382018-08-24 16:20:02 +020073}
74
David Sterbae4e9fd02018-08-24 14:45:20 +020075#else
76static void btrfs_assert_spinning_writers_get(struct extent_buffer *eb) { }
77static void btrfs_assert_spinning_writers_put(struct extent_buffer *eb) { }
78static void btrfs_assert_no_spinning_writers(struct extent_buffer *eb) { }
David Sterba225948d2018-08-24 15:53:42 +020079static void btrfs_assert_spinning_readers_put(struct extent_buffer *eb) { }
80static void btrfs_assert_spinning_readers_get(struct extent_buffer *eb) { }
David Sterba58a2ddae2018-08-24 16:13:41 +020081static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) { }
82static void btrfs_assert_tree_read_locks_get(struct extent_buffer *eb) { }
83static void btrfs_assert_tree_read_locks_put(struct extent_buffer *eb) { }
David Sterbae3f15382018-08-24 16:20:02 +020084void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
85static void btrfs_assert_tree_write_locks_get(struct extent_buffer *eb) { }
86static void btrfs_assert_tree_write_locks_put(struct extent_buffer *eb) { }
David Sterbae4e9fd02018-08-24 14:45:20 +020087#endif
88
David Sterbab95be2d2018-04-04 01:43:05 +020089void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -050090{
Qu Wenruo31aab402019-04-15 21:15:25 +080091 trace_btrfs_set_lock_blocking_read(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -070092 /*
David Sterbab95be2d2018-04-04 01:43:05 +020093 * No lock is required. The lock owner may change if we have a read
94 * lock, but it won't change to or away from us. If we have the write
95 * lock, we are the owner and it'll never change.
Chris Masonea4ebde2014-06-19 14:16:52 -070096 */
97 if (eb->lock_nested && current->pid == eb->lock_owner)
98 return;
David Sterbab95be2d2018-04-04 01:43:05 +020099 btrfs_assert_tree_read_locked(eb);
100 atomic_inc(&eb->blocking_readers);
David Sterbaafd495a2018-08-24 15:57:38 +0200101 btrfs_assert_spinning_readers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +0200102 read_unlock(&eb->lock);
103}
104
105void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
106{
Qu Wenruo31aab402019-04-15 21:15:25 +0800107 trace_btrfs_set_lock_blocking_write(eb);
David Sterbab95be2d2018-04-04 01:43:05 +0200108 /*
109 * No lock is required. The lock owner may change if we have a read
110 * lock, but it won't change to or away from us. If we have the write
111 * lock, we are the owner and it'll never change.
112 */
113 if (eb->lock_nested && current->pid == eb->lock_owner)
114 return;
David Sterba06297d82019-05-02 16:47:23 +0200115 if (eb->blocking_writers == 0) {
David Sterba843ccf92018-08-24 14:56:28 +0200116 btrfs_assert_spinning_writers_put(eb);
David Sterbab95be2d2018-04-04 01:43:05 +0200117 btrfs_assert_tree_locked(eb);
David Sterba06297d82019-05-02 16:47:23 +0200118 eb->blocking_writers++;
David Sterbab95be2d2018-04-04 01:43:05 +0200119 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400120 }
Chris Masonb4ce94d2009-02-04 09:25:08 -0500121}
Chris Masond3977122009-01-05 21:25:51 -0500122
Chris Masonb4ce94d2009-02-04 09:25:08 -0500123/*
Chris Masonbd681512011-07-16 15:23:14 -0400124 * take a spinning read lock. This will wait for any blocking
125 * writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500126 */
Chris Masonbd681512011-07-16 15:23:14 -0400127void btrfs_tree_read_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500128{
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800129 u64 start_ns = 0;
130
131 if (trace_btrfs_tree_read_lock_enabled())
132 start_ns = ktime_get_ns();
Chris Masonbd681512011-07-16 15:23:14 -0400133again:
Arne Jansen5b25f702011-09-13 10:55:48 +0200134 read_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200135 BUG_ON(eb->blocking_writers == 0 &&
136 current->pid == eb->lock_owner);
137 if (eb->blocking_writers && current->pid == eb->lock_owner) {
Arne Jansen5b25f702011-09-13 10:55:48 +0200138 /*
139 * This extent is already write-locked by our thread. We allow
140 * an additional read lock to be added because it's for the same
141 * thread. btrfs_find_all_roots() depends on this as it may be
142 * called on a partly (write-)locked tree.
143 */
144 BUG_ON(eb->lock_nested);
David Sterbaed1b4ed2018-08-24 16:31:17 +0200145 eb->lock_nested = true;
Arne Jansen5b25f702011-09-13 10:55:48 +0200146 read_unlock(&eb->lock);
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800147 trace_btrfs_tree_read_lock(eb, start_ns);
Arne Jansen5b25f702011-09-13 10:55:48 +0200148 return;
149 }
David Sterba06297d82019-05-02 16:47:23 +0200150 if (eb->blocking_writers) {
Chris Masonbd681512011-07-16 15:23:14 -0400151 read_unlock(&eb->lock);
Liu Bo39f9d022012-12-27 09:01:22 +0000152 wait_event(eb->write_lock_wq,
David Sterba06297d82019-05-02 16:47:23 +0200153 eb->blocking_writers == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400154 goto again;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500155 }
David Sterba5c9c7992018-08-24 16:15:51 +0200156 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200157 btrfs_assert_spinning_readers_get(eb);
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800158 trace_btrfs_tree_read_lock(eb, start_ns);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500159}
160
161/*
Chris Masonf82c4582014-11-19 10:25:09 -0800162 * take a spinning read lock.
163 * returns 1 if we get the read lock and 0 if we don't
164 * this won't wait for blocking writers
165 */
166int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
167{
David Sterba06297d82019-05-02 16:47:23 +0200168 if (eb->blocking_writers)
Chris Masonf82c4582014-11-19 10:25:09 -0800169 return 0;
170
171 read_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200172 if (eb->blocking_writers) {
Chris Masonf82c4582014-11-19 10:25:09 -0800173 read_unlock(&eb->lock);
174 return 0;
175 }
David Sterba5c9c7992018-08-24 16:15:51 +0200176 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200177 btrfs_assert_spinning_readers_get(eb);
Qu Wenruo31aab402019-04-15 21:15:25 +0800178 trace_btrfs_tree_read_lock_atomic(eb);
Chris Masonf82c4582014-11-19 10:25:09 -0800179 return 1;
180}
181
182/*
Chris Masonbd681512011-07-16 15:23:14 -0400183 * returns 1 if we get the read lock and 0 if we don't
184 * this won't wait for blocking writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500185 */
Chris Masonbd681512011-07-16 15:23:14 -0400186int btrfs_try_tree_read_lock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400187{
David Sterba06297d82019-05-02 16:47:23 +0200188 if (eb->blocking_writers)
Chris Masonbd681512011-07-16 15:23:14 -0400189 return 0;
Chris Mason66d7e852009-03-12 20:12:45 -0400190
Chris Masonea4ebde2014-06-19 14:16:52 -0700191 if (!read_trylock(&eb->lock))
192 return 0;
193
David Sterba06297d82019-05-02 16:47:23 +0200194 if (eb->blocking_writers) {
Chris Masonbd681512011-07-16 15:23:14 -0400195 read_unlock(&eb->lock);
196 return 0;
Chris Masonf9efa9c2008-06-25 16:14:04 -0400197 }
David Sterba5c9c7992018-08-24 16:15:51 +0200198 btrfs_assert_tree_read_locks_get(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200199 btrfs_assert_spinning_readers_get(eb);
Qu Wenruo31aab402019-04-15 21:15:25 +0800200 trace_btrfs_try_tree_read_lock(eb);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500201 return 1;
202}
203
204/*
Chris Masonbd681512011-07-16 15:23:14 -0400205 * returns 1 if we get the read lock and 0 if we don't
206 * this won't wait for blocking writers or readers
207 */
208int btrfs_try_tree_write_lock(struct extent_buffer *eb)
209{
David Sterba06297d82019-05-02 16:47:23 +0200210 if (eb->blocking_writers || atomic_read(&eb->blocking_readers))
Chris Masonbd681512011-07-16 15:23:14 -0400211 return 0;
Chris Masonea4ebde2014-06-19 14:16:52 -0700212
Chris Masonf82c4582014-11-19 10:25:09 -0800213 write_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200214 if (eb->blocking_writers || atomic_read(&eb->blocking_readers)) {
Chris Masonbd681512011-07-16 15:23:14 -0400215 write_unlock(&eb->lock);
216 return 0;
217 }
David Sterbac79adfc2018-08-24 16:24:26 +0200218 btrfs_assert_tree_write_locks_get(eb);
David Sterba843ccf92018-08-24 14:56:28 +0200219 btrfs_assert_spinning_writers_get(eb);
Arne Jansen5b25f702011-09-13 10:55:48 +0200220 eb->lock_owner = current->pid;
Qu Wenruo31aab402019-04-15 21:15:25 +0800221 trace_btrfs_try_tree_write_lock(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400222 return 1;
223}
224
225/*
226 * drop a spinning read lock
227 */
228void btrfs_tree_read_unlock(struct extent_buffer *eb)
229{
Qu Wenruo31aab402019-04-15 21:15:25 +0800230 trace_btrfs_tree_read_unlock(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700231 /*
232 * if we're nested, we have the write lock. No new locking
233 * is needed as long as we are the lock owner.
234 * The write unlock will do a barrier for us, and the lock_nested
235 * field only matters to the lock owner.
236 */
237 if (eb->lock_nested && current->pid == eb->lock_owner) {
David Sterbaed1b4ed2018-08-24 16:31:17 +0200238 eb->lock_nested = false;
Chris Masonea4ebde2014-06-19 14:16:52 -0700239 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200240 }
Chris Masonbd681512011-07-16 15:23:14 -0400241 btrfs_assert_tree_read_locked(eb);
David Sterbaafd495a2018-08-24 15:57:38 +0200242 btrfs_assert_spinning_readers_put(eb);
David Sterba5c9c7992018-08-24 16:15:51 +0200243 btrfs_assert_tree_read_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400244 read_unlock(&eb->lock);
245}
246
247/*
248 * drop a blocking read lock
249 */
250void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
251{
Qu Wenruo31aab402019-04-15 21:15:25 +0800252 trace_btrfs_tree_read_unlock_blocking(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700253 /*
254 * if we're nested, we have the write lock. No new locking
255 * is needed as long as we are the lock owner.
256 * The write unlock will do a barrier for us, and the lock_nested
257 * field only matters to the lock owner.
258 */
259 if (eb->lock_nested && current->pid == eb->lock_owner) {
David Sterbaed1b4ed2018-08-24 16:31:17 +0200260 eb->lock_nested = false;
Chris Masonea4ebde2014-06-19 14:16:52 -0700261 return;
Arne Jansen5b25f702011-09-13 10:55:48 +0200262 }
Chris Masonbd681512011-07-16 15:23:14 -0400263 btrfs_assert_tree_read_locked(eb);
264 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
David Sterba093258e2018-02-26 16:15:17 +0100265 /* atomic_dec_and_test implies a barrier */
266 if (atomic_dec_and_test(&eb->blocking_readers))
267 cond_wake_up_nomb(&eb->read_lock_wq);
David Sterba5c9c7992018-08-24 16:15:51 +0200268 btrfs_assert_tree_read_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400269}
270
271/*
272 * take a spinning write lock. This will wait for both
273 * blocking readers or writers
Chris Masonb4ce94d2009-02-04 09:25:08 -0500274 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100275void btrfs_tree_lock(struct extent_buffer *eb)
Chris Masonb4ce94d2009-02-04 09:25:08 -0500276{
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800277 u64 start_ns = 0;
278
279 if (trace_btrfs_tree_lock_enabled())
280 start_ns = ktime_get_ns();
281
Zhaolei166f66d2015-08-06 22:39:36 +0800282 WARN_ON(eb->lock_owner == current->pid);
Chris Masonbd681512011-07-16 15:23:14 -0400283again:
284 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
David Sterba06297d82019-05-02 16:47:23 +0200285 wait_event(eb->write_lock_wq, eb->blocking_writers == 0);
Chris Masonbd681512011-07-16 15:23:14 -0400286 write_lock(&eb->lock);
David Sterba06297d82019-05-02 16:47:23 +0200287 if (atomic_read(&eb->blocking_readers) || eb->blocking_writers) {
Chris Masonbd681512011-07-16 15:23:14 -0400288 write_unlock(&eb->lock);
Chris Masonbd681512011-07-16 15:23:14 -0400289 goto again;
290 }
David Sterba843ccf92018-08-24 14:56:28 +0200291 btrfs_assert_spinning_writers_get(eb);
David Sterbac79adfc2018-08-24 16:24:26 +0200292 btrfs_assert_tree_write_locks_get(eb);
Arne Jansen5b25f702011-09-13 10:55:48 +0200293 eb->lock_owner = current->pid;
Qu Wenruo34e73cc2019-04-15 21:15:24 +0800294 trace_btrfs_tree_lock(eb, start_ns);
Chris Masonb4ce94d2009-02-04 09:25:08 -0500295}
296
Chris Masonbd681512011-07-16 15:23:14 -0400297/*
298 * drop a spinning or a blocking write lock.
299 */
Jeff Mahoney143bede2012-03-01 14:56:26 +0100300void btrfs_tree_unlock(struct extent_buffer *eb)
Chris Mason925baed2008-06-25 16:01:30 -0400301{
David Sterba06297d82019-05-02 16:47:23 +0200302 int blockers = eb->blocking_writers;
Chris Masonb4ce94d2009-02-04 09:25:08 -0500303
Chris Masonbd681512011-07-16 15:23:14 -0400304 BUG_ON(blockers > 1);
305
306 btrfs_assert_tree_locked(eb);
Qu Wenruo31aab402019-04-15 21:15:25 +0800307 trace_btrfs_tree_unlock(eb);
Chris Masonea4ebde2014-06-19 14:16:52 -0700308 eb->lock_owner = 0;
David Sterbac79adfc2018-08-24 16:24:26 +0200309 btrfs_assert_tree_write_locks_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400310
311 if (blockers) {
David Sterba843ccf92018-08-24 14:56:28 +0200312 btrfs_assert_no_spinning_writers(eb);
David Sterba06297d82019-05-02 16:47:23 +0200313 eb->blocking_writers--;
Nikolay Borisov6e7ca092019-07-25 11:27:29 +0300314 /*
315 * We need to order modifying blocking_writers above with
316 * actually waking up the sleepers to ensure they see the
317 * updated value of blocking_writers
318 */
319 cond_wake_up(&eb->write_lock_wq);
Chris Masonbd681512011-07-16 15:23:14 -0400320 } else {
David Sterba843ccf92018-08-24 14:56:28 +0200321 btrfs_assert_spinning_writers_put(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400322 write_unlock(&eb->lock);
323 }
Chris Mason925baed2008-06-25 16:01:30 -0400324}