David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2008 Oracle. All rights reserved. |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 4 | */ |
David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 5 | |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 6 | #include <linux/sched.h> |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 7 | #include <linux/pagemap.h> |
| 8 | #include <linux/spinlock.h> |
| 9 | #include <linux/page-flags.h> |
Chris Mason | 4881ee5 | 2008-07-24 09:51:08 -0400 | [diff] [blame] | 10 | #include <asm/bug.h> |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 11 | #include "ctree.h" |
| 12 | #include "extent_io.h" |
| 13 | #include "locking.h" |
| 14 | |
Eric Sandeen | 48a3b63 | 2013-04-25 20:41:01 +0000 | [diff] [blame] | 15 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 16 | |
| 17 | /* |
| 18 | * if we currently have a spinning reader or writer lock |
| 19 | * (indicated by the rw flag) this will bump the count |
| 20 | * of blocking holders and drop the spinlock. |
| 21 | */ |
| 22 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 23 | { |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 24 | /* |
| 25 | * no lock is required. The lock owner may change if |
| 26 | * we have a read lock, but it won't change to or away |
| 27 | * from us. If we have the write lock, we are the owner |
| 28 | * and it'll never change. |
| 29 | */ |
| 30 | if (eb->lock_nested && current->pid == eb->lock_owner) |
| 31 | return; |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 32 | if (rw == BTRFS_WRITE_LOCK) { |
| 33 | if (atomic_read(&eb->blocking_writers) == 0) { |
| 34 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
| 35 | atomic_dec(&eb->spinning_writers); |
| 36 | btrfs_assert_tree_locked(eb); |
| 37 | atomic_inc(&eb->blocking_writers); |
| 38 | write_unlock(&eb->lock); |
| 39 | } |
| 40 | } else if (rw == BTRFS_READ_LOCK) { |
| 41 | btrfs_assert_tree_read_locked(eb); |
| 42 | atomic_inc(&eb->blocking_readers); |
| 43 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
| 44 | atomic_dec(&eb->spinning_readers); |
| 45 | read_unlock(&eb->lock); |
| 46 | } |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 47 | } |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 48 | |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 49 | /* |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 50 | * if we currently have a blocking lock, take the spinlock |
| 51 | * and drop our blocking count |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 52 | */ |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 53 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 54 | { |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 55 | /* |
| 56 | * no lock is required. The lock owner may change if |
| 57 | * we have a read lock, but it won't change to or away |
| 58 | * from us. If we have the write lock, we are the owner |
| 59 | * and it'll never change. |
| 60 | */ |
| 61 | if (eb->lock_nested && current->pid == eb->lock_owner) |
| 62 | return; |
| 63 | |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 64 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
| 65 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); |
| 66 | write_lock(&eb->lock); |
| 67 | WARN_ON(atomic_read(&eb->spinning_writers)); |
| 68 | atomic_inc(&eb->spinning_writers); |
David Sterba | 093258e | 2018-02-26 16:15:17 +0100 | [diff] [blame] | 69 | /* atomic_dec_and_test implies a barrier */ |
| 70 | if (atomic_dec_and_test(&eb->blocking_writers)) |
| 71 | cond_wake_up_nomb(&eb->write_lock_wq); |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 72 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { |
| 73 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); |
| 74 | read_lock(&eb->lock); |
| 75 | atomic_inc(&eb->spinning_readers); |
David Sterba | 093258e | 2018-02-26 16:15:17 +0100 | [diff] [blame] | 76 | /* atomic_dec_and_test implies a barrier */ |
| 77 | if (atomic_dec_and_test(&eb->blocking_readers)) |
| 78 | cond_wake_up_nomb(&eb->read_lock_wq); |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 79 | } |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | /* |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 83 | * take a spinning read lock. This will wait for any blocking |
| 84 | * writers |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 85 | */ |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 86 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 87 | { |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 88 | again: |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 89 | BUG_ON(!atomic_read(&eb->blocking_writers) && |
| 90 | current->pid == eb->lock_owner); |
| 91 | |
Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 92 | read_lock(&eb->lock); |
| 93 | if (atomic_read(&eb->blocking_writers) && |
| 94 | current->pid == eb->lock_owner) { |
| 95 | /* |
| 96 | * This extent is already write-locked by our thread. We allow |
| 97 | * an additional read lock to be added because it's for the same |
| 98 | * thread. btrfs_find_all_roots() depends on this as it may be |
| 99 | * called on a partly (write-)locked tree. |
| 100 | */ |
| 101 | BUG_ON(eb->lock_nested); |
| 102 | eb->lock_nested = 1; |
| 103 | read_unlock(&eb->lock); |
| 104 | return; |
| 105 | } |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 106 | if (atomic_read(&eb->blocking_writers)) { |
| 107 | read_unlock(&eb->lock); |
Liu Bo | 39f9d02 | 2012-12-27 09:01:22 +0000 | [diff] [blame] | 108 | wait_event(eb->write_lock_wq, |
| 109 | atomic_read(&eb->blocking_writers) == 0); |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 110 | goto again; |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 111 | } |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 112 | atomic_inc(&eb->read_locks); |
| 113 | atomic_inc(&eb->spinning_readers); |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | /* |
Chris Mason | f82c458 | 2014-11-19 10:25:09 -0800 | [diff] [blame] | 117 | * take a spinning read lock. |
| 118 | * returns 1 if we get the read lock and 0 if we don't |
| 119 | * this won't wait for blocking writers |
| 120 | */ |
| 121 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) |
| 122 | { |
| 123 | if (atomic_read(&eb->blocking_writers)) |
| 124 | return 0; |
| 125 | |
| 126 | read_lock(&eb->lock); |
| 127 | if (atomic_read(&eb->blocking_writers)) { |
| 128 | read_unlock(&eb->lock); |
| 129 | return 0; |
| 130 | } |
| 131 | atomic_inc(&eb->read_locks); |
| 132 | atomic_inc(&eb->spinning_readers); |
| 133 | return 1; |
| 134 | } |
| 135 | |
| 136 | /* |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 137 | * returns 1 if we get the read lock and 0 if we don't |
| 138 | * this won't wait for blocking writers |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 139 | */ |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 140 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 141 | { |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 142 | if (atomic_read(&eb->blocking_writers)) |
| 143 | return 0; |
Chris Mason | 66d7e85 | 2009-03-12 20:12:45 -0400 | [diff] [blame] | 144 | |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 145 | if (!read_trylock(&eb->lock)) |
| 146 | return 0; |
| 147 | |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 148 | if (atomic_read(&eb->blocking_writers)) { |
| 149 | read_unlock(&eb->lock); |
| 150 | return 0; |
Chris Mason | f9efa9c | 2008-06-25 16:14:04 -0400 | [diff] [blame] | 151 | } |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 152 | atomic_inc(&eb->read_locks); |
| 153 | atomic_inc(&eb->spinning_readers); |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 154 | return 1; |
| 155 | } |
| 156 | |
| 157 | /* |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 158 | * returns 1 if we get the read lock and 0 if we don't |
| 159 | * this won't wait for blocking writers or readers |
| 160 | */ |
| 161 | int btrfs_try_tree_write_lock(struct extent_buffer *eb) |
| 162 | { |
| 163 | if (atomic_read(&eb->blocking_writers) || |
| 164 | atomic_read(&eb->blocking_readers)) |
| 165 | return 0; |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 166 | |
Chris Mason | f82c458 | 2014-11-19 10:25:09 -0800 | [diff] [blame] | 167 | write_lock(&eb->lock); |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 168 | if (atomic_read(&eb->blocking_writers) || |
| 169 | atomic_read(&eb->blocking_readers)) { |
| 170 | write_unlock(&eb->lock); |
| 171 | return 0; |
| 172 | } |
| 173 | atomic_inc(&eb->write_locks); |
| 174 | atomic_inc(&eb->spinning_writers); |
Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 175 | eb->lock_owner = current->pid; |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 176 | return 1; |
| 177 | } |
| 178 | |
| 179 | /* |
| 180 | * drop a spinning read lock |
| 181 | */ |
| 182 | void btrfs_tree_read_unlock(struct extent_buffer *eb) |
| 183 | { |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 184 | /* |
| 185 | * if we're nested, we have the write lock. No new locking |
| 186 | * is needed as long as we are the lock owner. |
| 187 | * The write unlock will do a barrier for us, and the lock_nested |
| 188 | * field only matters to the lock owner. |
| 189 | */ |
| 190 | if (eb->lock_nested && current->pid == eb->lock_owner) { |
| 191 | eb->lock_nested = 0; |
| 192 | return; |
Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 193 | } |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 194 | btrfs_assert_tree_read_locked(eb); |
| 195 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); |
| 196 | atomic_dec(&eb->spinning_readers); |
| 197 | atomic_dec(&eb->read_locks); |
| 198 | read_unlock(&eb->lock); |
| 199 | } |
| 200 | |
| 201 | /* |
| 202 | * drop a blocking read lock |
| 203 | */ |
| 204 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) |
| 205 | { |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 206 | /* |
| 207 | * if we're nested, we have the write lock. No new locking |
| 208 | * is needed as long as we are the lock owner. |
| 209 | * The write unlock will do a barrier for us, and the lock_nested |
| 210 | * field only matters to the lock owner. |
| 211 | */ |
| 212 | if (eb->lock_nested && current->pid == eb->lock_owner) { |
| 213 | eb->lock_nested = 0; |
| 214 | return; |
Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 215 | } |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 216 | btrfs_assert_tree_read_locked(eb); |
| 217 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); |
David Sterba | 093258e | 2018-02-26 16:15:17 +0100 | [diff] [blame] | 218 | /* atomic_dec_and_test implies a barrier */ |
| 219 | if (atomic_dec_and_test(&eb->blocking_readers)) |
| 220 | cond_wake_up_nomb(&eb->read_lock_wq); |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 221 | atomic_dec(&eb->read_locks); |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * take a spinning write lock. This will wait for both |
| 226 | * blocking readers or writers |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 227 | */ |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 228 | void btrfs_tree_lock(struct extent_buffer *eb) |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 229 | { |
Zhaolei | 166f66d | 2015-08-06 22:39:36 +0800 | [diff] [blame] | 230 | WARN_ON(eb->lock_owner == current->pid); |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 231 | again: |
| 232 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); |
| 233 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); |
| 234 | write_lock(&eb->lock); |
| 235 | if (atomic_read(&eb->blocking_readers)) { |
| 236 | write_unlock(&eb->lock); |
| 237 | wait_event(eb->read_lock_wq, |
| 238 | atomic_read(&eb->blocking_readers) == 0); |
| 239 | goto again; |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 240 | } |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 241 | if (atomic_read(&eb->blocking_writers)) { |
| 242 | write_unlock(&eb->lock); |
| 243 | wait_event(eb->write_lock_wq, |
| 244 | atomic_read(&eb->blocking_writers) == 0); |
| 245 | goto again; |
| 246 | } |
| 247 | WARN_ON(atomic_read(&eb->spinning_writers)); |
| 248 | atomic_inc(&eb->spinning_writers); |
| 249 | atomic_inc(&eb->write_locks); |
Arne Jansen | 5b25f70 | 2011-09-13 10:55:48 +0200 | [diff] [blame] | 250 | eb->lock_owner = current->pid; |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 251 | } |
| 252 | |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 253 | /* |
| 254 | * drop a spinning or a blocking write lock. |
| 255 | */ |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 256 | void btrfs_tree_unlock(struct extent_buffer *eb) |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 257 | { |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 258 | int blockers = atomic_read(&eb->blocking_writers); |
Chris Mason | b4ce94d | 2009-02-04 09:25:08 -0500 | [diff] [blame] | 259 | |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 260 | BUG_ON(blockers > 1); |
| 261 | |
| 262 | btrfs_assert_tree_locked(eb); |
Chris Mason | ea4ebde | 2014-06-19 14:16:52 -0700 | [diff] [blame] | 263 | eb->lock_owner = 0; |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 264 | atomic_dec(&eb->write_locks); |
| 265 | |
| 266 | if (blockers) { |
| 267 | WARN_ON(atomic_read(&eb->spinning_writers)); |
| 268 | atomic_dec(&eb->blocking_writers); |
David Sterba | 093258e | 2018-02-26 16:15:17 +0100 | [diff] [blame] | 269 | /* Use the lighter barrier after atomic */ |
Nikolay Borisov | 2e32ef8 | 2018-02-14 14:37:26 +0200 | [diff] [blame] | 270 | smp_mb__after_atomic(); |
David Sterba | 093258e | 2018-02-26 16:15:17 +0100 | [diff] [blame] | 271 | cond_wake_up_nomb(&eb->write_lock_wq); |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 272 | } else { |
| 273 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); |
| 274 | atomic_dec(&eb->spinning_writers); |
| 275 | write_unlock(&eb->lock); |
| 276 | } |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 277 | } |
| 278 | |
Chris Mason | b9447ef8 | 2009-03-09 11:45:38 -0400 | [diff] [blame] | 279 | void btrfs_assert_tree_locked(struct extent_buffer *eb) |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 280 | { |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 281 | BUG_ON(!atomic_read(&eb->write_locks)); |
| 282 | } |
| 283 | |
Eric Sandeen | 48a3b63 | 2013-04-25 20:41:01 +0000 | [diff] [blame] | 284 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) |
Chris Mason | bd68151 | 2011-07-16 15:23:14 -0400 | [diff] [blame] | 285 | { |
| 286 | BUG_ON(!atomic_read(&eb->read_locks)); |
Chris Mason | 925baed | 2008-06-25 16:01:30 -0400 | [diff] [blame] | 287 | } |