blob: bbc45534ae9a6081af80204ad21b2033c5ae46af [file] [log] [blame]
David Sterba9888c342018-04-03 19:16:55 +02001/* SPDX-License-Identifier: GPL-2.0 */
Chris Mason925baed2008-06-25 16:01:30 -04002/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
Chris Mason925baed2008-06-25 16:01:30 -04004 */
5
David Sterba9888c342018-04-03 19:16:55 +02006#ifndef BTRFS_LOCKING_H
7#define BTRFS_LOCKING_H
Chris Mason925baed2008-06-25 16:01:30 -04008
Nikolay Borisov2992df72020-01-30 14:59:44 +02009#include <linux/atomic.h>
10#include <linux/wait.h>
11#include <linux/percpu_counter.h>
David Sterba31f6e762019-09-24 18:44:24 +020012#include "extent_io.h"
13
Chris Masonbd681512011-07-16 15:23:14 -040014#define BTRFS_WRITE_LOCK 1
15#define BTRFS_READ_LOCK 2
Chris Masonbd681512011-07-16 15:23:14 -040016
Josef Bacikfd7ba1c2020-08-20 11:46:02 -040017/*
18 * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
19 * the time of this patch is 8, which is how many we use. Keep this in mind if
20 * you decide you want to add another subclass.
21 */
22enum btrfs_lock_nesting {
23 BTRFS_NESTING_NORMAL,
24
25 /*
Josef Bacik9631e4c2020-08-20 11:46:03 -040026 * When we COW a block we are holding the lock on the original block,
27 * and since our lockdep maps are rootid+level, this confuses lockdep
28 * when we lock the newly allocated COW'd block. Handle this by having
29 * a subclass for COW'ed blocks so that lockdep doesn't complain.
30 */
31 BTRFS_NESTING_COW,
32
33 /*
Josef Bacikbf774672020-08-20 11:46:04 -040034 * Oftentimes we need to lock adjacent nodes on the same level while
35 * still holding the lock on the original node we searched to, such as
36 * for searching forward or for split/balance.
37 *
38 * Because of this we need to indicate to lockdep that this is
39 * acceptable by having a different subclass for each of these
40 * operations.
41 */
42 BTRFS_NESTING_LEFT,
43 BTRFS_NESTING_RIGHT,
44
45 /*
Josef Bacikbf59a5a2020-08-20 11:46:05 -040046 * When splitting we will be holding a lock on the left/right node when
47 * we need to cow that node, thus we need a new set of subclasses for
48 * these two operations.
49 */
50 BTRFS_NESTING_LEFT_COW,
51 BTRFS_NESTING_RIGHT_COW,
52
53 /*
Josef Bacik4dff97e2020-08-20 11:46:06 -040054 * When splitting we may push nodes to the left or right, but still use
55 * the subsequent nodes in our path, keeping our locks on those adjacent
56 * blocks. Thus when we go to allocate a new split block we've already
57 * used up all of our available subclasses, so this subclass exists to
58 * handle this case where we need to allocate a new split block.
59 */
60 BTRFS_NESTING_SPLIT,
61
62 /*
Josef Bacikcf6f34a2020-08-20 11:46:07 -040063 * When promoting a new block to a root we need to have a special
64 * subclass so we don't confuse lockdep, as it will appear that we are
65 * locking a higher level node before a lower level one. Copying also
66 * has this problem as it appears we're locking the same block again
67 * when we make a snapshot of an existing root.
68 */
69 BTRFS_NESTING_NEW_ROOT,
70
71 /*
Josef Bacikfd7ba1c2020-08-20 11:46:02 -040072 * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
73 * add this in here and add a static_assert to keep us from going over
74 * the limit. As of this writing we're limited to 8, and we're
75 * definitely using 8, hence this check to keep us from messing up in
76 * the future.
77 */
78 BTRFS_NESTING_MAX,
79};
80
81static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
82 "too many lock subclasses defined");
83
Nikolay Borisov2992df72020-01-30 14:59:44 +020084struct btrfs_path;
85
Josef Bacikfd7ba1c2020-08-20 11:46:02 -040086void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
Jeff Mahoney143bede2012-03-01 14:56:26 +010087void btrfs_tree_lock(struct extent_buffer *eb);
88void btrfs_tree_unlock(struct extent_buffer *eb);
Chris Masonb4ce94d2009-02-04 09:25:08 -050089
Josef Bacik0ecae6f2020-11-06 16:27:35 -050090void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
Chris Masonbd681512011-07-16 15:23:14 -040091void btrfs_tree_read_lock(struct extent_buffer *eb);
92void btrfs_tree_read_unlock(struct extent_buffer *eb);
Chris Masonbd681512011-07-16 15:23:14 -040093int btrfs_try_tree_read_lock(struct extent_buffer *eb);
94int btrfs_try_tree_write_lock(struct extent_buffer *eb);
Josef Bacik51899412020-08-20 11:46:01 -040095struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
Josef Bacik1bb96592020-11-06 16:27:33 -050096struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
Chris Masonf82c4582014-11-19 10:25:09 -080097
David Sterba31f6e762019-09-24 18:44:24 +020098#ifdef CONFIG_BTRFS_DEBUG
Filipe Manana49d0c642021-09-22 10:36:45 +010099static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
100{
101 lockdep_assert_held_write(&eb->lock);
David Sterba31f6e762019-09-24 18:44:24 +0200102}
103#else
Filipe Manana49d0c642021-09-22 10:36:45 +0100104static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
David Sterba31f6e762019-09-24 18:44:24 +0200105#endif
Chris Masonbd681512011-07-16 15:23:14 -0400106
David Sterba1f95ec02019-09-24 19:17:17 +0200107void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
David Sterbaed2b1d32019-09-24 19:17:17 +0200108
Chris Masonbd681512011-07-16 15:23:14 -0400109static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
110{
Josef Bacikac5887c2020-08-20 11:46:10 -0400111 if (rw == BTRFS_WRITE_LOCK)
Chris Masonbd681512011-07-16 15:23:14 -0400112 btrfs_tree_unlock(eb);
Chris Masonbd681512011-07-16 15:23:14 -0400113 else if (rw == BTRFS_READ_LOCK)
114 btrfs_tree_read_unlock(eb);
115 else
116 BUG();
117}
118
Nikolay Borisov2992df72020-01-30 14:59:44 +0200119struct btrfs_drew_lock {
120 atomic_t readers;
121 struct percpu_counter writers;
122 wait_queue_head_t pending_writers;
123 wait_queue_head_t pending_readers;
124};
125
126int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
127void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
128void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
129bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
130void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
131void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
132void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
133
Chris Mason925baed2008-06-25 16:01:30 -0400134#endif