blob: cad36c99a483ca8f6c508c32c3ad2ce94289f114 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason56bec292009-03-13 10:10:06 -04002/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
Chris Mason56bec292009-03-13 10:10:06 -04004 */
5
6#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Chris Mason56bec292009-03-13 10:10:06 -04008#include <linux/sort.h>
Chris Mason56bec292009-03-13 10:10:06 -04009#include "ctree.h"
10#include "delayed-ref.h"
11#include "transaction.h"
Qu Wenruo3368d002015-04-16 14:34:17 +080012#include "qgroup.h"
Chris Mason56bec292009-03-13 10:10:06 -040013
Miao Xie78a61842012-11-21 02:21:28 +000014struct kmem_cache *btrfs_delayed_ref_head_cachep;
15struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16struct kmem_cache *btrfs_delayed_data_ref_cachep;
17struct kmem_cache *btrfs_delayed_extent_op_cachep;
Chris Mason56bec292009-03-13 10:10:06 -040018/*
19 * delayed back reference update tracking. For subvolume trees
20 * we queue up extent allocations and backref maintenance for
21 * delayed processing. This avoids deep call chains where we
22 * add extents in the middle of btrfs_search_slot, and it allows
23 * us to buffer up frequently modified backrefs in an rb tree instead
24 * of hammering updates on the extent allocation tree.
Chris Mason56bec292009-03-13 10:10:06 -040025 */
26
27/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -040028 * compare two delayed tree backrefs with same bytenr and type
Chris Mason56bec292009-03-13 10:10:06 -040029 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -040030static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 struct btrfs_delayed_tree_ref *ref2)
Chris Mason56bec292009-03-13 10:10:06 -040032{
Josef Bacik3b60d432017-09-29 15:43:58 -040033 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
Josef Bacik41b0fc42013-04-01 20:36:28 -040034 if (ref1->root < ref2->root)
35 return -1;
36 if (ref1->root > ref2->root)
37 return 1;
38 } else {
39 if (ref1->parent < ref2->parent)
40 return -1;
41 if (ref1->parent > ref2->parent)
42 return 1;
43 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -040044 return 0;
45}
46
47/*
48 * compare two delayed data backrefs with same bytenr and type
49 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -040050static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 struct btrfs_delayed_data_ref *ref2)
Yan Zheng5d4f98a2009-06-10 10:45:14 -040052{
53 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 if (ref1->root < ref2->root)
55 return -1;
56 if (ref1->root > ref2->root)
57 return 1;
58 if (ref1->objectid < ref2->objectid)
59 return -1;
60 if (ref1->objectid > ref2->objectid)
61 return 1;
62 if (ref1->offset < ref2->offset)
63 return -1;
64 if (ref1->offset > ref2->offset)
65 return 1;
66 } else {
67 if (ref1->parent < ref2->parent)
68 return -1;
69 if (ref1->parent > ref2->parent)
70 return 1;
71 }
72 return 0;
73}
74
Josef Bacik1d148e52017-10-19 14:15:59 -040075static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 struct btrfs_delayed_ref_node *ref2,
77 bool check_seq)
78{
79 int ret = 0;
80
81 if (ref1->type < ref2->type)
82 return -1;
83 if (ref1->type > ref2->type)
84 return 1;
85 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 btrfs_delayed_node_to_tree_ref(ref2));
89 else
90 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 btrfs_delayed_node_to_data_ref(ref2));
92 if (ret)
93 return ret;
94 if (check_seq) {
95 if (ref1->seq < ref2->seq)
96 return -1;
97 if (ref1->seq > ref2->seq)
98 return 1;
99 }
100 return 0;
101}
102
Liu Boc46effa2013-10-14 12:59:45 +0800103/* insert a new ref to head ref rbtree */
Liu Bo5c9d0282018-08-23 03:51:49 +0800104static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
Liu Boc46effa2013-10-14 12:59:45 +0800105 struct rb_node *node)
106{
Liu Bo5c9d0282018-08-23 03:51:49 +0800107 struct rb_node **p = &root->rb_root.rb_node;
Liu Boc46effa2013-10-14 12:59:45 +0800108 struct rb_node *parent_node = NULL;
109 struct btrfs_delayed_ref_head *entry;
110 struct btrfs_delayed_ref_head *ins;
111 u64 bytenr;
Liu Bo5c9d0282018-08-23 03:51:49 +0800112 bool leftmost = true;
Liu Boc46effa2013-10-14 12:59:45 +0800113
114 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
Josef Bacikd2788502017-09-29 15:43:57 -0400115 bytenr = ins->bytenr;
Liu Boc46effa2013-10-14 12:59:45 +0800116 while (*p) {
117 parent_node = *p;
118 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
119 href_node);
120
Liu Bo5c9d0282018-08-23 03:51:49 +0800121 if (bytenr < entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800122 p = &(*p)->rb_left;
Liu Bo5c9d0282018-08-23 03:51:49 +0800123 } else if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800124 p = &(*p)->rb_right;
Liu Bo5c9d0282018-08-23 03:51:49 +0800125 leftmost = false;
126 } else {
Liu Boc46effa2013-10-14 12:59:45 +0800127 return entry;
Liu Bo5c9d0282018-08-23 03:51:49 +0800128 }
Liu Boc46effa2013-10-14 12:59:45 +0800129 }
130
131 rb_link_node(node, parent_node, p);
Liu Bo5c9d0282018-08-23 03:51:49 +0800132 rb_insert_color_cached(node, root, leftmost);
Liu Boc46effa2013-10-14 12:59:45 +0800133 return NULL;
134}
135
Liu Boe3d03962018-08-23 03:51:50 +0800136static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400137 struct btrfs_delayed_ref_node *ins)
138{
Liu Boe3d03962018-08-23 03:51:50 +0800139 struct rb_node **p = &root->rb_root.rb_node;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400140 struct rb_node *node = &ins->ref_node;
141 struct rb_node *parent_node = NULL;
142 struct btrfs_delayed_ref_node *entry;
Liu Boe3d03962018-08-23 03:51:50 +0800143 bool leftmost = true;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400144
145 while (*p) {
146 int comp;
147
148 parent_node = *p;
149 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
150 ref_node);
151 comp = comp_refs(ins, entry, true);
Liu Boe3d03962018-08-23 03:51:50 +0800152 if (comp < 0) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400153 p = &(*p)->rb_left;
Liu Boe3d03962018-08-23 03:51:50 +0800154 } else if (comp > 0) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400155 p = &(*p)->rb_right;
Liu Boe3d03962018-08-23 03:51:50 +0800156 leftmost = false;
157 } else {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400158 return entry;
Liu Boe3d03962018-08-23 03:51:50 +0800159 }
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400160 }
161
162 rb_link_node(node, parent_node, p);
Liu Boe3d03962018-08-23 03:51:50 +0800163 rb_insert_color_cached(node, root, leftmost);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400164 return NULL;
165}
166
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800167static struct btrfs_delayed_ref_head *find_first_ref_head(
168 struct btrfs_delayed_ref_root *dr)
169{
170 struct rb_node *n;
171 struct btrfs_delayed_ref_head *entry;
172
173 n = rb_first_cached(&dr->href_root);
174 if (!n)
175 return NULL;
176
177 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
178
179 return entry;
180}
181
Chris Mason56bec292009-03-13 10:10:06 -0400182/*
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800183 * Find a head entry based on bytenr. This returns the delayed ref head if it
184 * was able to find one, or NULL if nothing was in that spot. If return_bigger
185 * is given, the next bigger entry is returned if no exact match is found.
Chris Mason56bec292009-03-13 10:10:06 -0400186 */
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800187static struct btrfs_delayed_ref_head *find_ref_head(
Liu Bo5c9d0282018-08-23 03:51:49 +0800188 struct btrfs_delayed_ref_root *dr, u64 bytenr,
Lu Fengqid93527942018-10-11 13:40:38 +0800189 bool return_bigger)
Chris Mason56bec292009-03-13 10:10:06 -0400190{
Liu Bo5c9d0282018-08-23 03:51:49 +0800191 struct rb_root *root = &dr->href_root.rb_root;
Arne Jansend1270cd2011-09-13 15:16:43 +0200192 struct rb_node *n;
Liu Boc46effa2013-10-14 12:59:45 +0800193 struct btrfs_delayed_ref_head *entry;
Chris Mason56bec292009-03-13 10:10:06 -0400194
Arne Jansend1270cd2011-09-13 15:16:43 +0200195 n = root->rb_node;
196 entry = NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400197 while (n) {
Liu Boc46effa2013-10-14 12:59:45 +0800198 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400199
Josef Bacikd2788502017-09-29 15:43:57 -0400200 if (bytenr < entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400201 n = n->rb_left;
Josef Bacikd2788502017-09-29 15:43:57 -0400202 else if (bytenr > entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400203 n = n->rb_right;
204 else
205 return entry;
206 }
Arne Jansend1270cd2011-09-13 15:16:43 +0200207 if (entry && return_bigger) {
Josef Bacikd2788502017-09-29 15:43:57 -0400208 if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800209 n = rb_next(&entry->href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200210 if (!n)
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800211 return NULL;
Liu Boc46effa2013-10-14 12:59:45 +0800212 entry = rb_entry(n, struct btrfs_delayed_ref_head,
213 href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200214 }
215 return entry;
216 }
Chris Mason56bec292009-03-13 10:10:06 -0400217 return NULL;
218}
219
Lu Fengqi9e920a62018-10-11 13:40:34 +0800220int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
Chris Masonc3e69d52009-03-13 10:17:05 -0400221 struct btrfs_delayed_ref_head *head)
Chris Mason56bec292009-03-13 10:10:06 -0400222{
David Sterbaa4666e62018-03-16 02:21:22 +0100223 lockdep_assert_held(&delayed_refs->lock);
Chris Masonc3e69d52009-03-13 10:17:05 -0400224 if (mutex_trylock(&head->mutex))
225 return 0;
226
Josef Bacikd2788502017-09-29 15:43:57 -0400227 refcount_inc(&head->refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400228 spin_unlock(&delayed_refs->lock);
229
230 mutex_lock(&head->mutex);
231 spin_lock(&delayed_refs->lock);
Josef Bacikd2788502017-09-29 15:43:57 -0400232 if (RB_EMPTY_NODE(&head->href_node)) {
Chris Masonc3e69d52009-03-13 10:17:05 -0400233 mutex_unlock(&head->mutex);
Josef Bacikd2788502017-09-29 15:43:57 -0400234 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400235 return -EAGAIN;
236 }
Josef Bacikd2788502017-09-29 15:43:57 -0400237 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400238 return 0;
239}
240
Stefan Behrens35a36212013-08-14 18:12:25 +0200241static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
Josef Bacikae1e2062012-08-07 16:00:32 -0400242 struct btrfs_delayed_ref_root *delayed_refs,
Josef Bacikd7df2c72014-01-23 09:21:38 -0500243 struct btrfs_delayed_ref_head *head,
Josef Bacikae1e2062012-08-07 16:00:32 -0400244 struct btrfs_delayed_ref_node *ref)
245{
David Sterbaa4666e62018-03-16 02:21:22 +0100246 lockdep_assert_held(&head->lock);
Liu Boe3d03962018-08-23 03:51:50 +0800247 rb_erase_cached(&ref->ref_node, &head->ref_tree);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400248 RB_CLEAR_NODE(&ref->ref_node);
Josef Bacikd2788502017-09-29 15:43:57 -0400249 if (!list_empty(&ref->add_list))
250 list_del(&ref->add_list);
Josef Bacikae1e2062012-08-07 16:00:32 -0400251 ref->in_tree = 0;
252 btrfs_put_delayed_ref(ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500253 atomic_dec(&delayed_refs->num_entries);
Josef Bacikae1e2062012-08-07 16:00:32 -0400254}
255
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100256static bool merge_ref(struct btrfs_trans_handle *trans,
257 struct btrfs_delayed_ref_root *delayed_refs,
258 struct btrfs_delayed_ref_head *head,
259 struct btrfs_delayed_ref_node *ref,
260 u64 seq)
261{
262 struct btrfs_delayed_ref_node *next;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400263 struct rb_node *node = rb_next(&ref->ref_node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100264 bool done = false;
265
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400266 while (!done && node) {
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100267 int mod;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100268
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400269 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
270 node = rb_next(node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100271 if (seq && next->seq >= seq)
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400272 break;
Josef Bacik1d148e52017-10-19 14:15:59 -0400273 if (comp_refs(ref, next, false))
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400274 break;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100275
276 if (ref->action == next->action) {
277 mod = next->ref_mod;
278 } else {
279 if (ref->ref_mod < next->ref_mod) {
280 swap(ref, next);
281 done = true;
282 }
283 mod = -next->ref_mod;
284 }
285
286 drop_delayed_ref(trans, delayed_refs, head, next);
287 ref->ref_mod += mod;
288 if (ref->ref_mod == 0) {
289 drop_delayed_ref(trans, delayed_refs, head, ref);
290 done = true;
291 } else {
292 /*
293 * Can't have multiples of the same ref on a tree block.
294 */
295 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
296 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
297 }
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100298 }
299
300 return done;
301}
302
303void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100304 struct btrfs_delayed_ref_root *delayed_refs,
305 struct btrfs_delayed_ref_head *head)
306{
Nikolay Borisovbe97f132018-04-19 11:06:39 +0300307 struct btrfs_fs_info *fs_info = trans->fs_info;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100308 struct btrfs_delayed_ref_node *ref;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400309 struct rb_node *node;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100310 u64 seq = 0;
311
David Sterbaa4666e62018-03-16 02:21:22 +0100312 lockdep_assert_held(&head->lock);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100313
Liu Boe3d03962018-08-23 03:51:50 +0800314 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100315 return;
316
317 /* We don't have too many refs to merge for data. */
318 if (head->is_data)
319 return;
320
321 spin_lock(&fs_info->tree_mod_seq_lock);
322 if (!list_empty(&fs_info->tree_mod_seq_list)) {
323 struct seq_list *elem;
324
325 elem = list_first_entry(&fs_info->tree_mod_seq_list,
326 struct seq_list, list);
327 seq = elem->seq;
328 }
329 spin_unlock(&fs_info->tree_mod_seq_lock);
330
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400331again:
Liu Boe3d03962018-08-23 03:51:50 +0800332 for (node = rb_first_cached(&head->ref_tree); node;
333 node = rb_next(node)) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400334 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100335 if (seq && ref->seq >= seq)
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100336 continue;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400337 if (merge_ref(trans, delayed_refs, head, ref, seq))
338 goto again;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100339 }
340}
341
Nikolay Borisov41d0bd32018-04-04 15:57:42 +0300342int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
Arne Jansen00f04b82011-09-14 12:37:00 +0200343{
344 struct seq_list *elem;
Jan Schmidt097b8a72012-06-21 11:08:04 +0200345 int ret = 0;
Arne Jansen00f04b82011-09-14 12:37:00 +0200346
Jan Schmidt097b8a72012-06-21 11:08:04 +0200347 spin_lock(&fs_info->tree_mod_seq_lock);
348 if (!list_empty(&fs_info->tree_mod_seq_list)) {
349 elem = list_first_entry(&fs_info->tree_mod_seq_list,
350 struct seq_list, list);
351 if (seq >= elem->seq) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400352 btrfs_debug(fs_info,
Nikolay Borisov41d0bd32018-04-04 15:57:42 +0300353 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400354 (u32)(seq >> 32), (u32)seq,
Nikolay Borisov41d0bd32018-04-04 15:57:42 +0300355 (u32)(elem->seq >> 32), (u32)elem->seq);
Jan Schmidt097b8a72012-06-21 11:08:04 +0200356 ret = 1;
357 }
Arne Jansen00f04b82011-09-14 12:37:00 +0200358 }
Jan Schmidt097b8a72012-06-21 11:08:04 +0200359
360 spin_unlock(&fs_info->tree_mod_seq_lock);
361 return ret;
Arne Jansen00f04b82011-09-14 12:37:00 +0200362}
363
Lu Fengqi5637c742018-10-11 13:40:33 +0800364struct btrfs_delayed_ref_head *btrfs_select_ref_head(
365 struct btrfs_delayed_ref_root *delayed_refs)
Chris Masonc3e69d52009-03-13 10:17:05 -0400366{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500367 struct btrfs_delayed_ref_head *head;
Chris Masonc3e69d52009-03-13 10:17:05 -0400368
Chris Masonc3e69d52009-03-13 10:17:05 -0400369again:
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800370 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
371 true);
372 if (!head && delayed_refs->run_delayed_start != 0) {
Josef Bacikd7df2c72014-01-23 09:21:38 -0500373 delayed_refs->run_delayed_start = 0;
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800374 head = find_first_ref_head(delayed_refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400375 }
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800376 if (!head)
377 return NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400378
Josef Bacikd7df2c72014-01-23 09:21:38 -0500379 while (head->processing) {
380 struct rb_node *node;
Miao Xie093486c2012-12-19 08:10:10 +0000381
Josef Bacikd7df2c72014-01-23 09:21:38 -0500382 node = rb_next(&head->href_node);
383 if (!node) {
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800384 if (delayed_refs->run_delayed_start == 0)
Josef Bacikd7df2c72014-01-23 09:21:38 -0500385 return NULL;
386 delayed_refs->run_delayed_start = 0;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500387 goto again;
388 }
389 head = rb_entry(node, struct btrfs_delayed_ref_head,
390 href_node);
391 }
392
393 head->processing = 1;
394 WARN_ON(delayed_refs->num_heads_ready == 0);
395 delayed_refs->num_heads_ready--;
Josef Bacikd2788502017-09-29 15:43:57 -0400396 delayed_refs->run_delayed_start = head->bytenr +
397 head->num_bytes;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500398 return head;
Miao Xie093486c2012-12-19 08:10:10 +0000399}
400
Josef Bacikd7baffd2018-12-03 10:20:29 -0500401void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
402 struct btrfs_delayed_ref_head *head)
403{
404 lockdep_assert_held(&delayed_refs->lock);
405 lockdep_assert_held(&head->lock);
406
407 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
408 RB_CLEAR_NODE(&head->href_node);
409 atomic_dec(&delayed_refs->num_entries);
410 delayed_refs->num_heads--;
411 if (head->processing == 0)
412 delayed_refs->num_heads_ready--;
413}
414
Chris Mason56bec292009-03-13 10:10:06 -0400415/*
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800416 * Helper to insert the ref_node to the tail or merge with tail.
417 *
418 * Return 0 for insert.
419 * Return >0 for merge.
420 */
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400421static int insert_delayed_ref(struct btrfs_trans_handle *trans,
422 struct btrfs_delayed_ref_root *root,
423 struct btrfs_delayed_ref_head *href,
424 struct btrfs_delayed_ref_node *ref)
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800425{
426 struct btrfs_delayed_ref_node *exist;
427 int mod;
428 int ret = 0;
429
430 spin_lock(&href->lock);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400431 exist = tree_insert(&href->ref_tree, ref);
432 if (!exist)
433 goto inserted;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800434
435 /* Now we are sure we can merge */
436 ret = 1;
437 if (exist->action == ref->action) {
438 mod = ref->ref_mod;
439 } else {
440 /* Need to change action */
441 if (exist->ref_mod < ref->ref_mod) {
442 exist->action = ref->action;
443 mod = -exist->ref_mod;
444 exist->ref_mod = ref->ref_mod;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800445 if (ref->action == BTRFS_ADD_DELAYED_REF)
446 list_add_tail(&exist->add_list,
447 &href->ref_add_list);
448 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
449 ASSERT(!list_empty(&exist->add_list));
450 list_del(&exist->add_list);
451 } else {
452 ASSERT(0);
453 }
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800454 } else
455 mod = -ref->ref_mod;
456 }
457 exist->ref_mod += mod;
458
459 /* remove existing tail if its ref_mod is zero */
460 if (exist->ref_mod == 0)
461 drop_delayed_ref(trans, root, href, exist);
462 spin_unlock(&href->lock);
463 return ret;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400464inserted:
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800465 if (ref->action == BTRFS_ADD_DELAYED_REF)
466 list_add_tail(&ref->add_list, &href->ref_add_list);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800467 atomic_inc(&root->num_entries);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800468 spin_unlock(&href->lock);
469 return ret;
470}
471
472/*
Chris Mason56bec292009-03-13 10:10:06 -0400473 * helper function to update the accounting in the head ref
474 * existing and update must have the same bytenr
475 */
Josef Bacikba2c4d42018-12-03 10:20:33 -0500476static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400477 struct btrfs_delayed_ref_head *existing,
478 struct btrfs_delayed_ref_head *update,
Omar Sandoval7be07912017-06-06 16:45:30 -0700479 int *old_ref_mod_ret)
Chris Mason56bec292009-03-13 10:10:06 -0400480{
Josef Bacikba2c4d42018-12-03 10:20:33 -0500481 struct btrfs_delayed_ref_root *delayed_refs =
482 &trans->transaction->delayed_refs;
483 struct btrfs_fs_info *fs_info = trans->fs_info;
Josef Bacik12621332015-02-03 07:50:16 -0800484 int old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400485
Josef Bacikd2788502017-09-29 15:43:57 -0400486 BUG_ON(existing->is_data != update->is_data);
Chris Mason56bec292009-03-13 10:10:06 -0400487
Josef Bacikd2788502017-09-29 15:43:57 -0400488 spin_lock(&existing->lock);
489 if (update->must_insert_reserved) {
Chris Mason56bec292009-03-13 10:10:06 -0400490 /* if the extent was freed and then
491 * reallocated before the delayed ref
492 * entries were processed, we can end up
493 * with an existing head ref without
494 * the must_insert_reserved flag set.
495 * Set it again here
496 */
Josef Bacikd2788502017-09-29 15:43:57 -0400497 existing->must_insert_reserved = update->must_insert_reserved;
Chris Mason56bec292009-03-13 10:10:06 -0400498
499 /*
500 * update the num_bytes so we make sure the accounting
501 * is done correctly
502 */
503 existing->num_bytes = update->num_bytes;
504
505 }
506
Josef Bacikd2788502017-09-29 15:43:57 -0400507 if (update->extent_op) {
508 if (!existing->extent_op) {
509 existing->extent_op = update->extent_op;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400510 } else {
Josef Bacikd2788502017-09-29 15:43:57 -0400511 if (update->extent_op->update_key) {
512 memcpy(&existing->extent_op->key,
513 &update->extent_op->key,
514 sizeof(update->extent_op->key));
515 existing->extent_op->update_key = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400516 }
Josef Bacikd2788502017-09-29 15:43:57 -0400517 if (update->extent_op->update_flags) {
518 existing->extent_op->flags_to_set |=
519 update->extent_op->flags_to_set;
520 existing->extent_op->update_flags = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400521 }
Josef Bacikd2788502017-09-29 15:43:57 -0400522 btrfs_free_delayed_extent_op(update->extent_op);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400523 }
524 }
Chris Mason56bec292009-03-13 10:10:06 -0400525 /*
Josef Bacikd7df2c72014-01-23 09:21:38 -0500526 * update the reference mod on the head to reflect this new operation,
527 * only need the lock for this case cause we could be processing it
528 * currently, for refs we just added we know we're a-ok.
Chris Mason56bec292009-03-13 10:10:06 -0400529 */
Josef Bacikd2788502017-09-29 15:43:57 -0400530 old_ref_mod = existing->total_ref_mod;
Omar Sandoval7be07912017-06-06 16:45:30 -0700531 if (old_ref_mod_ret)
532 *old_ref_mod_ret = old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400533 existing->ref_mod += update->ref_mod;
Josef Bacikd2788502017-09-29 15:43:57 -0400534 existing->total_ref_mod += update->ref_mod;
Josef Bacik12621332015-02-03 07:50:16 -0800535
536 /*
537 * If we are going to from a positive ref mod to a negative or vice
538 * versa we need to make sure to adjust pending_csums accordingly.
539 */
Josef Bacikd2788502017-09-29 15:43:57 -0400540 if (existing->is_data) {
Josef Bacikba2c4d42018-12-03 10:20:33 -0500541 u64 csum_leaves =
542 btrfs_csum_bytes_to_leaves(fs_info,
543 existing->num_bytes);
544
545 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
Josef Bacik12621332015-02-03 07:50:16 -0800546 delayed_refs->pending_csums -= existing->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500547 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
548 }
549 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
Josef Bacik12621332015-02-03 07:50:16 -0800550 delayed_refs->pending_csums += existing->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500551 trans->delayed_ref_updates += csum_leaves;
552 }
Josef Bacik12621332015-02-03 07:50:16 -0800553 }
Josef Bacikd2788502017-09-29 15:43:57 -0400554 spin_unlock(&existing->lock);
Chris Mason56bec292009-03-13 10:10:06 -0400555}
556
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300557static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
558 struct btrfs_qgroup_extent_record *qrecord,
559 u64 bytenr, u64 num_bytes, u64 ref_root,
560 u64 reserved, int action, bool is_data,
561 bool is_system)
562{
563 int count_mod = 1;
564 int must_insert_reserved = 0;
565
566 /* If reserved is provided, it must be a data extent. */
567 BUG_ON(!is_data && reserved);
568
569 /*
570 * The head node stores the sum of all the mods, so dropping a ref
571 * should drop the sum in the head node by one.
572 */
573 if (action == BTRFS_UPDATE_DELAYED_HEAD)
574 count_mod = 0;
575 else if (action == BTRFS_DROP_DELAYED_REF)
576 count_mod = -1;
577
578 /*
579 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
580 * accounting when the extent is finally added, or if a later
581 * modification deletes the delayed ref without ever inserting the
582 * extent into the extent allocation tree. ref->must_insert_reserved
583 * is the flag used to record that accounting mods are required.
584 *
585 * Once we record must_insert_reserved, switch the action to
586 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
587 */
588 if (action == BTRFS_ADD_DELAYED_EXTENT)
589 must_insert_reserved = 1;
590 else
591 must_insert_reserved = 0;
592
593 refcount_set(&head_ref->refs, 1);
594 head_ref->bytenr = bytenr;
595 head_ref->num_bytes = num_bytes;
596 head_ref->ref_mod = count_mod;
597 head_ref->must_insert_reserved = must_insert_reserved;
598 head_ref->is_data = is_data;
599 head_ref->is_system = is_system;
Liu Boe3d03962018-08-23 03:51:50 +0800600 head_ref->ref_tree = RB_ROOT_CACHED;
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300601 INIT_LIST_HEAD(&head_ref->ref_add_list);
602 RB_CLEAR_NODE(&head_ref->href_node);
603 head_ref->processing = 0;
604 head_ref->total_ref_mod = count_mod;
605 head_ref->qgroup_reserved = 0;
606 head_ref->qgroup_ref_root = 0;
607 spin_lock_init(&head_ref->lock);
608 mutex_init(&head_ref->mutex);
609
610 if (qrecord) {
611 if (ref_root && reserved) {
612 head_ref->qgroup_ref_root = ref_root;
613 head_ref->qgroup_reserved = reserved;
614 }
615
616 qrecord->bytenr = bytenr;
617 qrecord->num_bytes = num_bytes;
618 qrecord->old_roots = NULL;
619 }
620}
621
Chris Mason56bec292009-03-13 10:10:06 -0400622/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400623 * helper function to actually insert a head node into the rbtree.
Chris Mason56bec292009-03-13 10:10:06 -0400624 * this does all the dirty work in terms of maintaining the correct
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400625 * overall modification count.
Chris Mason56bec292009-03-13 10:10:06 -0400626 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500627static noinline struct btrfs_delayed_ref_head *
Nikolay Borisov1acda0c2018-04-19 11:06:37 +0300628add_delayed_ref_head(struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400629 struct btrfs_delayed_ref_head *head_ref,
Qu Wenruo3368d002015-04-16 14:34:17 +0800630 struct btrfs_qgroup_extent_record *qrecord,
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300631 int action, int *qrecord_inserted_ret,
Omar Sandoval7be07912017-06-06 16:45:30 -0700632 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400633{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500634 struct btrfs_delayed_ref_head *existing;
Chris Mason56bec292009-03-13 10:10:06 -0400635 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800636 int qrecord_inserted = 0;
Chris Mason56bec292009-03-13 10:10:06 -0400637
Chris Mason56bec292009-03-13 10:10:06 -0400638 delayed_refs = &trans->transaction->delayed_refs;
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300639
Qu Wenruo3368d002015-04-16 14:34:17 +0800640 /* Record qgroup extent info if provided */
641 if (qrecord) {
Nikolay Borisoveb86ec72018-04-24 17:18:23 +0300642 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
Qu Wenruocb93b522016-08-15 10:36:50 +0800643 delayed_refs, qrecord))
Qu Wenruo3368d002015-04-16 14:34:17 +0800644 kfree(qrecord);
Qu Wenruofb235dc2017-02-15 10:43:03 +0800645 else
646 qrecord_inserted = 1;
Qu Wenruo3368d002015-04-16 14:34:17 +0800647 }
648
Nikolay Borisov1acda0c2018-04-19 11:06:37 +0300649 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000650
Josef Bacikd7df2c72014-01-23 09:21:38 -0500651 existing = htree_insert(&delayed_refs->href_root,
652 &head_ref->href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400653 if (existing) {
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300654 WARN_ON(qrecord && head_ref->qgroup_ref_root
655 && head_ref->qgroup_reserved
656 && existing->qgroup_ref_root
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800657 && existing->qgroup_reserved);
Josef Bacikba2c4d42018-12-03 10:20:33 -0500658 update_existing_head_ref(trans, existing, head_ref,
Omar Sandoval7be07912017-06-06 16:45:30 -0700659 old_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400660 /*
661 * we've updated the existing ref, free the newly
662 * allocated ref
663 */
Miao Xie78a61842012-11-21 02:21:28 +0000664 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500665 head_ref = existing;
Chris Mason56bec292009-03-13 10:10:06 -0400666 } else {
Omar Sandoval7be07912017-06-06 16:45:30 -0700667 if (old_ref_mod)
668 *old_ref_mod = 0;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500669 if (head_ref->is_data && head_ref->ref_mod < 0) {
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300670 delayed_refs->pending_csums += head_ref->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500671 trans->delayed_ref_updates +=
672 btrfs_csum_bytes_to_leaves(trans->fs_info,
673 head_ref->num_bytes);
674 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400675 delayed_refs->num_heads++;
676 delayed_refs->num_heads_ready++;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500677 atomic_inc(&delayed_refs->num_entries);
Chris Mason56bec292009-03-13 10:10:06 -0400678 trans->delayed_ref_updates++;
679 }
Qu Wenruofb235dc2017-02-15 10:43:03 +0800680 if (qrecord_inserted_ret)
681 *qrecord_inserted_ret = qrecord_inserted;
Omar Sandoval7be07912017-06-06 16:45:30 -0700682 if (new_ref_mod)
683 *new_ref_mod = head_ref->total_ref_mod;
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300684
Josef Bacikd7df2c72014-01-23 09:21:38 -0500685 return head_ref;
Chris Mason56bec292009-03-13 10:10:06 -0400686}
687
688/*
Nikolay Borisovcb49a872018-04-24 17:18:17 +0300689 * init_delayed_ref_common - Initialize the structure which represents a
690 * modification to a an extent.
691 *
692 * @fs_info: Internal to the mounted filesystem mount structure.
693 *
694 * @ref: The structure which is going to be initialized.
695 *
696 * @bytenr: The logical address of the extent for which a modification is
697 * going to be recorded.
698 *
699 * @num_bytes: Size of the extent whose modification is being recorded.
700 *
701 * @ref_root: The id of the root where this modification has originated, this
702 * can be either one of the well-known metadata trees or the
703 * subvolume id which references this extent.
704 *
705 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
706 * BTRFS_ADD_DELAYED_EXTENT
707 *
708 * @ref_type: Holds the type of the extent which is being recorded, can be
709 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
710 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
711 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
712 */
713static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
714 struct btrfs_delayed_ref_node *ref,
715 u64 bytenr, u64 num_bytes, u64 ref_root,
716 int action, u8 ref_type)
717{
718 u64 seq = 0;
719
720 if (action == BTRFS_ADD_DELAYED_EXTENT)
721 action = BTRFS_ADD_DELAYED_REF;
722
723 if (is_fstree(ref_root))
724 seq = atomic64_read(&fs_info->tree_mod_seq);
725
726 refcount_set(&ref->refs, 1);
727 ref->bytenr = bytenr;
728 ref->num_bytes = num_bytes;
729 ref->ref_mod = 1;
730 ref->action = action;
731 ref->is_head = 0;
732 ref->in_tree = 1;
733 ref->seq = seq;
734 ref->type = ref_type;
735 RB_CLEAR_NODE(&ref->ref_node);
736 INIT_LIST_HEAD(&ref->add_list);
737}
738
739/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400740 * add a delayed tree ref. This does all of the accounting required
Chris Mason56bec292009-03-13 10:10:06 -0400741 * to make sure the delayed ref is eventually processed before this
742 * transaction commits.
743 */
Nikolay Borisov44e1c472018-06-20 15:48:53 +0300744int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400745 u64 bytenr, u64 num_bytes, u64 parent,
746 u64 ref_root, int level, int action,
Omar Sandoval7be07912017-06-06 16:45:30 -0700747 struct btrfs_delayed_extent_op *extent_op,
748 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400749{
Nikolay Borisov44e1c472018-06-20 15:48:53 +0300750 struct btrfs_fs_info *fs_info = trans->fs_info;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400751 struct btrfs_delayed_tree_ref *ref;
Chris Mason56bec292009-03-13 10:10:06 -0400752 struct btrfs_delayed_ref_head *head_ref;
753 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800754 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800755 int qrecord_inserted;
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300756 bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
Nikolay Borisov70d64002018-04-24 17:18:20 +0300757 int ret;
758 u8 ref_type;
Chris Mason56bec292009-03-13 10:10:06 -0400759
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400760 BUG_ON(extent_op && extent_op->is_data);
Miao Xie78a61842012-11-21 02:21:28 +0000761 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
Chris Mason56bec292009-03-13 10:10:06 -0400762 if (!ref)
763 return -ENOMEM;
764
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300765 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
766 if (!head_ref) {
767 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
768 return -ENOMEM;
769 }
770
771 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
772 is_fstree(ref_root)) {
773 record = kmalloc(sizeof(*record), GFP_NOFS);
774 if (!record) {
775 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
776 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
777 return -ENOMEM;
778 }
779 }
780
Nikolay Borisov70d64002018-04-24 17:18:20 +0300781 if (parent)
782 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
783 else
784 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300785
Nikolay Borisov70d64002018-04-24 17:18:20 +0300786 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
787 ref_root, action, ref_type);
788 ref->root = ref_root;
789 ref->parent = parent;
790 ref->level = level;
791
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300792 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
793 ref_root, 0, action, false, is_system);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400794 head_ref->extent_op = extent_op;
795
Chris Mason56bec292009-03-13 10:10:06 -0400796 delayed_refs = &trans->transaction->delayed_refs;
797 spin_lock(&delayed_refs->lock);
798
799 /*
800 * insert both the head node and the new ref without dropping
801 * the spin lock
802 */
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300803 head_ref = add_delayed_ref_head(trans, head_ref, record,
804 action, &qrecord_inserted,
Nikolay Borisov5e388e92018-04-18 09:41:54 +0300805 old_ref_mod, new_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400806
Nikolay Borisov70d64002018-04-24 17:18:20 +0300807 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
Chris Mason56bec292009-03-13 10:10:06 -0400808 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200809
Josef Bacikba2c4d42018-12-03 10:20:33 -0500810 /*
811 * Need to update the delayed_refs_rsv with any changes we may have
812 * made.
813 */
814 btrfs_update_delayed_refs_rsv(trans);
815
Nikolay Borisov70d64002018-04-24 17:18:20 +0300816 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
817 action == BTRFS_ADD_DELAYED_EXTENT ?
818 BTRFS_ADD_DELAYED_REF : action);
819 if (ret > 0)
820 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
821
Qu Wenruofb235dc2017-02-15 10:43:03 +0800822 if (qrecord_inserted)
Nikolay Borisov952bd3db2018-01-29 15:53:01 +0200823 btrfs_qgroup_trace_extent_post(fs_info, record);
824
Chris Mason56bec292009-03-13 10:10:06 -0400825 return 0;
826}
827
828/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400829 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
830 */
Nikolay Borisov88a979c2018-06-20 15:48:54 +0300831int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400832 u64 bytenr, u64 num_bytes,
833 u64 parent, u64 ref_root,
Omar Sandoval7be07912017-06-06 16:45:30 -0700834 u64 owner, u64 offset, u64 reserved, int action,
835 int *old_ref_mod, int *new_ref_mod)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400836{
Nikolay Borisov88a979c2018-06-20 15:48:54 +0300837 struct btrfs_fs_info *fs_info = trans->fs_info;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400838 struct btrfs_delayed_data_ref *ref;
839 struct btrfs_delayed_ref_head *head_ref;
840 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800841 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800842 int qrecord_inserted;
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300843 int ret;
844 u8 ref_type;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400845
Miao Xie78a61842012-11-21 02:21:28 +0000846 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400847 if (!ref)
848 return -ENOMEM;
849
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300850 if (parent)
851 ref_type = BTRFS_SHARED_DATA_REF_KEY;
852 else
853 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
854 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
855 ref_root, action, ref_type);
856 ref->root = ref_root;
857 ref->parent = parent;
858 ref->objectid = owner;
859 ref->offset = offset;
860
861
Miao Xie78a61842012-11-21 02:21:28 +0000862 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400863 if (!head_ref) {
Miao Xie78a61842012-11-21 02:21:28 +0000864 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400865 return -ENOMEM;
866 }
867
Josef Bacikafcdd122016-09-02 15:40:02 -0400868 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
869 is_fstree(ref_root)) {
Qu Wenruo3368d002015-04-16 14:34:17 +0800870 record = kmalloc(sizeof(*record), GFP_NOFS);
871 if (!record) {
872 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
873 kmem_cache_free(btrfs_delayed_ref_head_cachep,
874 head_ref);
875 return -ENOMEM;
876 }
877 }
878
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300879 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
880 reserved, action, true, false);
Jeff Mahoneyfef394f2016-12-13 14:39:34 -0500881 head_ref->extent_op = NULL;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400882
883 delayed_refs = &trans->transaction->delayed_refs;
884 spin_lock(&delayed_refs->lock);
885
886 /*
887 * insert both the head node and the new ref without dropping
888 * the spin lock
889 */
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300890 head_ref = add_delayed_ref_head(trans, head_ref, record,
891 action, &qrecord_inserted,
Omar Sandoval7be07912017-06-06 16:45:30 -0700892 old_ref_mod, new_ref_mod);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400893
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300894 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400895 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200896
Josef Bacikba2c4d42018-12-03 10:20:33 -0500897 /*
898 * Need to update the delayed_refs_rsv with any changes we may have
899 * made.
900 */
901 btrfs_update_delayed_refs_rsv(trans);
902
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300903 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
904 action == BTRFS_ADD_DELAYED_EXTENT ?
905 BTRFS_ADD_DELAYED_REF : action);
906 if (ret > 0)
907 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
908
909
Qu Wenruofb235dc2017-02-15 10:43:03 +0800910 if (qrecord_inserted)
911 return btrfs_qgroup_trace_extent_post(fs_info, record);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400912 return 0;
913}
914
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200915int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
916 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400917 u64 bytenr, u64 num_bytes,
918 struct btrfs_delayed_extent_op *extent_op)
919{
920 struct btrfs_delayed_ref_head *head_ref;
921 struct btrfs_delayed_ref_root *delayed_refs;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400922
Miao Xie78a61842012-11-21 02:21:28 +0000923 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400924 if (!head_ref)
925 return -ENOMEM;
926
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300927 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
928 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
929 false);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400930 head_ref->extent_op = extent_op;
931
932 delayed_refs = &trans->transaction->delayed_refs;
933 spin_lock(&delayed_refs->lock);
934
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300935 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
936 NULL, NULL, NULL);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400937
938 spin_unlock(&delayed_refs->lock);
Josef Bacikba2c4d42018-12-03 10:20:33 -0500939
940 /*
941 * Need to update the delayed_refs_rsv with any changes we may have
942 * made.
943 */
944 btrfs_update_delayed_refs_rsv(trans);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400945 return 0;
946}
947
948/*
Chris Mason1887be62009-03-13 10:11:24 -0400949 * this does a simple search for the head node for a given extent.
950 * It must be called with the delayed ref spinlock held, and it returns
951 * the head node if any where found, or NULL if not.
952 */
953struct btrfs_delayed_ref_head *
Liu Bof72ad18e2017-01-30 12:24:37 -0800954btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
Chris Mason1887be62009-03-13 10:11:24 -0400955{
Lu Fengqid93527942018-10-11 13:40:38 +0800956 return find_ref_head(delayed_refs, bytenr, false);
Chris Mason1887be62009-03-13 10:11:24 -0400957}
Miao Xie78a61842012-11-21 02:21:28 +0000958
David Sterbae67c7182018-02-19 17:24:18 +0100959void __cold btrfs_delayed_ref_exit(void)
Miao Xie78a61842012-11-21 02:21:28 +0000960{
Kinglong Mee5598e902016-01-29 21:36:35 +0800961 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
962 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
963 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
964 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
Miao Xie78a61842012-11-21 02:21:28 +0000965}
966
Liu Bof5c29bd2017-11-02 17:21:50 -0600967int __init btrfs_delayed_ref_init(void)
Miao Xie78a61842012-11-21 02:21:28 +0000968{
969 btrfs_delayed_ref_head_cachep = kmem_cache_create(
970 "btrfs_delayed_ref_head",
971 sizeof(struct btrfs_delayed_ref_head), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300972 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000973 if (!btrfs_delayed_ref_head_cachep)
974 goto fail;
975
976 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
977 "btrfs_delayed_tree_ref",
978 sizeof(struct btrfs_delayed_tree_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300979 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000980 if (!btrfs_delayed_tree_ref_cachep)
981 goto fail;
982
983 btrfs_delayed_data_ref_cachep = kmem_cache_create(
984 "btrfs_delayed_data_ref",
985 sizeof(struct btrfs_delayed_data_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300986 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000987 if (!btrfs_delayed_data_ref_cachep)
988 goto fail;
989
990 btrfs_delayed_extent_op_cachep = kmem_cache_create(
991 "btrfs_delayed_extent_op",
992 sizeof(struct btrfs_delayed_extent_op), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300993 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000994 if (!btrfs_delayed_extent_op_cachep)
995 goto fail;
996
997 return 0;
998fail:
999 btrfs_delayed_ref_exit();
1000 return -ENOMEM;
1001}