blob: b9b41c838da48f476e527696f9640243f043459a [file] [log] [blame]
Chris Mason56bec292009-03-13 10:10:06 -04001/*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Chris Mason56bec292009-03-13 10:10:06 -040021#include <linux/sort.h>
Chris Mason56bec292009-03-13 10:10:06 -040022#include "ctree.h"
23#include "delayed-ref.h"
24#include "transaction.h"
Qu Wenruo3368d002015-04-16 14:34:17 +080025#include "qgroup.h"
Chris Mason56bec292009-03-13 10:10:06 -040026
Miao Xie78a61842012-11-21 02:21:28 +000027struct kmem_cache *btrfs_delayed_ref_head_cachep;
28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29struct kmem_cache *btrfs_delayed_data_ref_cachep;
30struct kmem_cache *btrfs_delayed_extent_op_cachep;
Chris Mason56bec292009-03-13 10:10:06 -040031/*
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
Chris Mason56bec292009-03-13 10:10:06 -040038 */
39
40/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -040041 * compare two delayed tree backrefs with same bytenr and type
Chris Mason56bec292009-03-13 10:10:06 -040042 */
Yan Zheng5d4f98a2009-06-10 10:45:14 -040043static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
Josef Bacik41b0fc42013-04-01 20:36:28 -040044 struct btrfs_delayed_tree_ref *ref1, int type)
Chris Mason56bec292009-03-13 10:10:06 -040045{
Josef Bacik41b0fc42013-04-01 20:36:28 -040046 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47 if (ref1->root < ref2->root)
48 return -1;
49 if (ref1->root > ref2->root)
50 return 1;
51 } else {
52 if (ref1->parent < ref2->parent)
53 return -1;
54 if (ref1->parent > ref2->parent)
55 return 1;
56 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -040057 return 0;
58}
59
60/*
61 * compare two delayed data backrefs with same bytenr and type
62 */
63static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64 struct btrfs_delayed_data_ref *ref1)
65{
66 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 if (ref1->root < ref2->root)
68 return -1;
69 if (ref1->root > ref2->root)
70 return 1;
71 if (ref1->objectid < ref2->objectid)
72 return -1;
73 if (ref1->objectid > ref2->objectid)
74 return 1;
75 if (ref1->offset < ref2->offset)
76 return -1;
77 if (ref1->offset > ref2->offset)
78 return 1;
79 } else {
80 if (ref1->parent < ref2->parent)
81 return -1;
82 if (ref1->parent > ref2->parent)
83 return 1;
84 }
85 return 0;
86}
87
Liu Boc46effa2013-10-14 12:59:45 +080088/* insert a new ref to head ref rbtree */
89static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90 struct rb_node *node)
91{
92 struct rb_node **p = &root->rb_node;
93 struct rb_node *parent_node = NULL;
94 struct btrfs_delayed_ref_head *entry;
95 struct btrfs_delayed_ref_head *ins;
96 u64 bytenr;
97
98 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
Josef Bacikd2788502017-09-29 15:43:57 -040099 bytenr = ins->bytenr;
Liu Boc46effa2013-10-14 12:59:45 +0800100 while (*p) {
101 parent_node = *p;
102 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103 href_node);
104
Josef Bacikd2788502017-09-29 15:43:57 -0400105 if (bytenr < entry->bytenr)
Liu Boc46effa2013-10-14 12:59:45 +0800106 p = &(*p)->rb_left;
Josef Bacikd2788502017-09-29 15:43:57 -0400107 else if (bytenr > entry->bytenr)
Liu Boc46effa2013-10-14 12:59:45 +0800108 p = &(*p)->rb_right;
109 else
110 return entry;
111 }
112
113 rb_link_node(node, parent_node, p);
114 rb_insert_color(node, root);
115 return NULL;
116}
117
Chris Mason56bec292009-03-13 10:10:06 -0400118/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400119 * find an head entry based on bytenr. This returns the delayed ref
Arne Jansend1270cd2011-09-13 15:16:43 +0200120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
122 * match is found.
Chris Mason56bec292009-03-13 10:10:06 -0400123 */
Liu Boc46effa2013-10-14 12:59:45 +0800124static struct btrfs_delayed_ref_head *
125find_ref_head(struct rb_root *root, u64 bytenr,
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000126 int return_bigger)
Chris Mason56bec292009-03-13 10:10:06 -0400127{
Arne Jansend1270cd2011-09-13 15:16:43 +0200128 struct rb_node *n;
Liu Boc46effa2013-10-14 12:59:45 +0800129 struct btrfs_delayed_ref_head *entry;
Chris Mason56bec292009-03-13 10:10:06 -0400130
Arne Jansend1270cd2011-09-13 15:16:43 +0200131 n = root->rb_node;
132 entry = NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400133 while (n) {
Liu Boc46effa2013-10-14 12:59:45 +0800134 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400135
Josef Bacikd2788502017-09-29 15:43:57 -0400136 if (bytenr < entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400137 n = n->rb_left;
Josef Bacikd2788502017-09-29 15:43:57 -0400138 else if (bytenr > entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400139 n = n->rb_right;
140 else
141 return entry;
142 }
Arne Jansend1270cd2011-09-13 15:16:43 +0200143 if (entry && return_bigger) {
Josef Bacikd2788502017-09-29 15:43:57 -0400144 if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800145 n = rb_next(&entry->href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200146 if (!n)
147 n = rb_first(root);
Liu Boc46effa2013-10-14 12:59:45 +0800148 entry = rb_entry(n, struct btrfs_delayed_ref_head,
149 href_node);
Filipe Manana6103fb42014-02-12 15:07:52 +0000150 return entry;
Arne Jansend1270cd2011-09-13 15:16:43 +0200151 }
152 return entry;
153 }
Chris Mason56bec292009-03-13 10:10:06 -0400154 return NULL;
155}
156
Chris Masonc3e69d52009-03-13 10:17:05 -0400157int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158 struct btrfs_delayed_ref_head *head)
Chris Mason56bec292009-03-13 10:10:06 -0400159{
Chris Masonc3e69d52009-03-13 10:17:05 -0400160 struct btrfs_delayed_ref_root *delayed_refs;
Chris Mason56bec292009-03-13 10:10:06 -0400161
Chris Masonc3e69d52009-03-13 10:17:05 -0400162 delayed_refs = &trans->transaction->delayed_refs;
163 assert_spin_locked(&delayed_refs->lock);
164 if (mutex_trylock(&head->mutex))
165 return 0;
166
Josef Bacikd2788502017-09-29 15:43:57 -0400167 refcount_inc(&head->refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400168 spin_unlock(&delayed_refs->lock);
169
170 mutex_lock(&head->mutex);
171 spin_lock(&delayed_refs->lock);
Josef Bacikd2788502017-09-29 15:43:57 -0400172 if (RB_EMPTY_NODE(&head->href_node)) {
Chris Masonc3e69d52009-03-13 10:17:05 -0400173 mutex_unlock(&head->mutex);
Josef Bacikd2788502017-09-29 15:43:57 -0400174 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400175 return -EAGAIN;
176 }
Josef Bacikd2788502017-09-29 15:43:57 -0400177 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400178 return 0;
179}
180
Stefan Behrens35a36212013-08-14 18:12:25 +0200181static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
Josef Bacikae1e2062012-08-07 16:00:32 -0400182 struct btrfs_delayed_ref_root *delayed_refs,
Josef Bacikd7df2c72014-01-23 09:21:38 -0500183 struct btrfs_delayed_ref_head *head,
Josef Bacikae1e2062012-08-07 16:00:32 -0400184 struct btrfs_delayed_ref_node *ref)
185{
Josef Bacikd2788502017-09-29 15:43:57 -0400186 assert_spin_locked(&head->lock);
187 list_del(&ref->list);
188 if (!list_empty(&ref->add_list))
189 list_del(&ref->add_list);
Josef Bacikae1e2062012-08-07 16:00:32 -0400190 ref->in_tree = 0;
191 btrfs_put_delayed_ref(ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500192 atomic_dec(&delayed_refs->num_entries);
Josef Bacikae1e2062012-08-07 16:00:32 -0400193 if (trans->delayed_ref_updates)
194 trans->delayed_ref_updates--;
195}
196
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100197static bool merge_ref(struct btrfs_trans_handle *trans,
198 struct btrfs_delayed_ref_root *delayed_refs,
199 struct btrfs_delayed_ref_head *head,
200 struct btrfs_delayed_ref_node *ref,
201 u64 seq)
202{
203 struct btrfs_delayed_ref_node *next;
204 bool done = false;
205
206 next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
207 list);
208 while (!done && &next->list != &head->ref_list) {
209 int mod;
210 struct btrfs_delayed_ref_node *next2;
211
212 next2 = list_next_entry(next, list);
213
214 if (next == ref)
215 goto next;
216
217 if (seq && next->seq >= seq)
218 goto next;
219
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100220 if (next->type != ref->type)
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100221 goto next;
222
223 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
224 ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
225 comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
226 btrfs_delayed_node_to_tree_ref(next),
227 ref->type))
228 goto next;
229 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
230 ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
231 comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
232 btrfs_delayed_node_to_data_ref(next)))
233 goto next;
234
235 if (ref->action == next->action) {
236 mod = next->ref_mod;
237 } else {
238 if (ref->ref_mod < next->ref_mod) {
239 swap(ref, next);
240 done = true;
241 }
242 mod = -next->ref_mod;
243 }
244
245 drop_delayed_ref(trans, delayed_refs, head, next);
246 ref->ref_mod += mod;
247 if (ref->ref_mod == 0) {
248 drop_delayed_ref(trans, delayed_refs, head, ref);
249 done = true;
250 } else {
251 /*
252 * Can't have multiples of the same ref on a tree block.
253 */
254 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
255 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
256 }
257next:
258 next = next2;
259 }
260
261 return done;
262}
263
264void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
265 struct btrfs_fs_info *fs_info,
266 struct btrfs_delayed_ref_root *delayed_refs,
267 struct btrfs_delayed_ref_head *head)
268{
269 struct btrfs_delayed_ref_node *ref;
270 u64 seq = 0;
271
272 assert_spin_locked(&head->lock);
273
274 if (list_empty(&head->ref_list))
275 return;
276
277 /* We don't have too many refs to merge for data. */
278 if (head->is_data)
279 return;
280
281 spin_lock(&fs_info->tree_mod_seq_lock);
282 if (!list_empty(&fs_info->tree_mod_seq_list)) {
283 struct seq_list *elem;
284
285 elem = list_first_entry(&fs_info->tree_mod_seq_list,
286 struct seq_list, list);
287 seq = elem->seq;
288 }
289 spin_unlock(&fs_info->tree_mod_seq_lock);
290
291 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
292 list);
293 while (&ref->list != &head->ref_list) {
294 if (seq && ref->seq >= seq)
295 goto next;
296
297 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
298 if (list_empty(&head->ref_list))
299 break;
300 ref = list_first_entry(&head->ref_list,
301 struct btrfs_delayed_ref_node,
302 list);
303 continue;
304 }
305next:
306 ref = list_next_entry(ref, list);
307 }
308}
309
Jan Schmidt097b8a72012-06-21 11:08:04 +0200310int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
311 struct btrfs_delayed_ref_root *delayed_refs,
Arne Jansen00f04b82011-09-14 12:37:00 +0200312 u64 seq)
313{
314 struct seq_list *elem;
Jan Schmidt097b8a72012-06-21 11:08:04 +0200315 int ret = 0;
Arne Jansen00f04b82011-09-14 12:37:00 +0200316
Jan Schmidt097b8a72012-06-21 11:08:04 +0200317 spin_lock(&fs_info->tree_mod_seq_lock);
318 if (!list_empty(&fs_info->tree_mod_seq_list)) {
319 elem = list_first_entry(&fs_info->tree_mod_seq_list,
320 struct seq_list, list);
321 if (seq >= elem->seq) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400322 btrfs_debug(fs_info,
323 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
324 (u32)(seq >> 32), (u32)seq,
325 (u32)(elem->seq >> 32), (u32)elem->seq,
326 delayed_refs);
Jan Schmidt097b8a72012-06-21 11:08:04 +0200327 ret = 1;
328 }
Arne Jansen00f04b82011-09-14 12:37:00 +0200329 }
Jan Schmidt097b8a72012-06-21 11:08:04 +0200330
331 spin_unlock(&fs_info->tree_mod_seq_lock);
332 return ret;
Arne Jansen00f04b82011-09-14 12:37:00 +0200333}
334
Josef Bacikd7df2c72014-01-23 09:21:38 -0500335struct btrfs_delayed_ref_head *
336btrfs_select_ref_head(struct btrfs_trans_handle *trans)
Chris Masonc3e69d52009-03-13 10:17:05 -0400337{
Chris Masonc3e69d52009-03-13 10:17:05 -0400338 struct btrfs_delayed_ref_root *delayed_refs;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500339 struct btrfs_delayed_ref_head *head;
340 u64 start;
341 bool loop = false;
Chris Masonc3e69d52009-03-13 10:17:05 -0400342
343 delayed_refs = &trans->transaction->delayed_refs;
Liu Boc46effa2013-10-14 12:59:45 +0800344
Chris Masonc3e69d52009-03-13 10:17:05 -0400345again:
Josef Bacikd7df2c72014-01-23 09:21:38 -0500346 start = delayed_refs->run_delayed_start;
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000347 head = find_ref_head(&delayed_refs->href_root, start, 1);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500348 if (!head && !loop) {
349 delayed_refs->run_delayed_start = 0;
Chris Masonc3e69d52009-03-13 10:17:05 -0400350 start = 0;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500351 loop = true;
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000352 head = find_ref_head(&delayed_refs->href_root, start, 1);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500353 if (!head)
354 return NULL;
355 } else if (!head && loop) {
356 return NULL;
Chris Masonc3e69d52009-03-13 10:17:05 -0400357 }
Chris Mason56bec292009-03-13 10:10:06 -0400358
Josef Bacikd7df2c72014-01-23 09:21:38 -0500359 while (head->processing) {
360 struct rb_node *node;
Miao Xie093486c2012-12-19 08:10:10 +0000361
Josef Bacikd7df2c72014-01-23 09:21:38 -0500362 node = rb_next(&head->href_node);
363 if (!node) {
364 if (loop)
365 return NULL;
366 delayed_refs->run_delayed_start = 0;
367 start = 0;
368 loop = true;
369 goto again;
370 }
371 head = rb_entry(node, struct btrfs_delayed_ref_head,
372 href_node);
373 }
374
375 head->processing = 1;
376 WARN_ON(delayed_refs->num_heads_ready == 0);
377 delayed_refs->num_heads_ready--;
Josef Bacikd2788502017-09-29 15:43:57 -0400378 delayed_refs->run_delayed_start = head->bytenr +
379 head->num_bytes;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500380 return head;
Miao Xie093486c2012-12-19 08:10:10 +0000381}
382
Chris Mason56bec292009-03-13 10:10:06 -0400383/*
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800384 * Helper to insert the ref_node to the tail or merge with tail.
385 *
386 * Return 0 for insert.
387 * Return >0 for merge.
388 */
389static int
390add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
391 struct btrfs_delayed_ref_root *root,
392 struct btrfs_delayed_ref_head *href,
393 struct btrfs_delayed_ref_node *ref)
394{
395 struct btrfs_delayed_ref_node *exist;
396 int mod;
397 int ret = 0;
398
399 spin_lock(&href->lock);
400 /* Check whether we can merge the tail node with ref */
401 if (list_empty(&href->ref_list))
402 goto add_tail;
403 exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
404 list);
405 /* No need to compare bytenr nor is_head */
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100406 if (exist->type != ref->type || exist->seq != ref->seq)
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800407 goto add_tail;
408
409 if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
410 exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
411 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
412 btrfs_delayed_node_to_tree_ref(ref),
413 ref->type))
414 goto add_tail;
415 if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
416 exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
417 comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
418 btrfs_delayed_node_to_data_ref(ref)))
419 goto add_tail;
420
421 /* Now we are sure we can merge */
422 ret = 1;
423 if (exist->action == ref->action) {
424 mod = ref->ref_mod;
425 } else {
426 /* Need to change action */
427 if (exist->ref_mod < ref->ref_mod) {
428 exist->action = ref->action;
429 mod = -exist->ref_mod;
430 exist->ref_mod = ref->ref_mod;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800431 if (ref->action == BTRFS_ADD_DELAYED_REF)
432 list_add_tail(&exist->add_list,
433 &href->ref_add_list);
434 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
435 ASSERT(!list_empty(&exist->add_list));
436 list_del(&exist->add_list);
437 } else {
438 ASSERT(0);
439 }
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800440 } else
441 mod = -ref->ref_mod;
442 }
443 exist->ref_mod += mod;
444
445 /* remove existing tail if its ref_mod is zero */
446 if (exist->ref_mod == 0)
447 drop_delayed_ref(trans, root, href, exist);
448 spin_unlock(&href->lock);
449 return ret;
450
451add_tail:
452 list_add_tail(&ref->list, &href->ref_list);
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800453 if (ref->action == BTRFS_ADD_DELAYED_REF)
454 list_add_tail(&ref->add_list, &href->ref_add_list);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800455 atomic_inc(&root->num_entries);
456 trans->delayed_ref_updates++;
457 spin_unlock(&href->lock);
458 return ret;
459}
460
461/*
Chris Mason56bec292009-03-13 10:10:06 -0400462 * helper function to update the accounting in the head ref
463 * existing and update must have the same bytenr
464 */
465static noinline void
Josef Bacik12621332015-02-03 07:50:16 -0800466update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
Josef Bacikd2788502017-09-29 15:43:57 -0400467 struct btrfs_delayed_ref_head *existing,
468 struct btrfs_delayed_ref_head *update,
Omar Sandoval7be07912017-06-06 16:45:30 -0700469 int *old_ref_mod_ret)
Chris Mason56bec292009-03-13 10:10:06 -0400470{
Josef Bacik12621332015-02-03 07:50:16 -0800471 int old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400472
Josef Bacikd2788502017-09-29 15:43:57 -0400473 BUG_ON(existing->is_data != update->is_data);
Chris Mason56bec292009-03-13 10:10:06 -0400474
Josef Bacikd2788502017-09-29 15:43:57 -0400475 spin_lock(&existing->lock);
476 if (update->must_insert_reserved) {
Chris Mason56bec292009-03-13 10:10:06 -0400477 /* if the extent was freed and then
478 * reallocated before the delayed ref
479 * entries were processed, we can end up
480 * with an existing head ref without
481 * the must_insert_reserved flag set.
482 * Set it again here
483 */
Josef Bacikd2788502017-09-29 15:43:57 -0400484 existing->must_insert_reserved = update->must_insert_reserved;
Chris Mason56bec292009-03-13 10:10:06 -0400485
486 /*
487 * update the num_bytes so we make sure the accounting
488 * is done correctly
489 */
490 existing->num_bytes = update->num_bytes;
491
492 }
493
Josef Bacikd2788502017-09-29 15:43:57 -0400494 if (update->extent_op) {
495 if (!existing->extent_op) {
496 existing->extent_op = update->extent_op;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400497 } else {
Josef Bacikd2788502017-09-29 15:43:57 -0400498 if (update->extent_op->update_key) {
499 memcpy(&existing->extent_op->key,
500 &update->extent_op->key,
501 sizeof(update->extent_op->key));
502 existing->extent_op->update_key = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400503 }
Josef Bacikd2788502017-09-29 15:43:57 -0400504 if (update->extent_op->update_flags) {
505 existing->extent_op->flags_to_set |=
506 update->extent_op->flags_to_set;
507 existing->extent_op->update_flags = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400508 }
Josef Bacikd2788502017-09-29 15:43:57 -0400509 btrfs_free_delayed_extent_op(update->extent_op);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400510 }
511 }
Chris Mason56bec292009-03-13 10:10:06 -0400512 /*
Josef Bacikd7df2c72014-01-23 09:21:38 -0500513 * update the reference mod on the head to reflect this new operation,
514 * only need the lock for this case cause we could be processing it
515 * currently, for refs we just added we know we're a-ok.
Chris Mason56bec292009-03-13 10:10:06 -0400516 */
Josef Bacikd2788502017-09-29 15:43:57 -0400517 old_ref_mod = existing->total_ref_mod;
Omar Sandoval7be07912017-06-06 16:45:30 -0700518 if (old_ref_mod_ret)
519 *old_ref_mod_ret = old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400520 existing->ref_mod += update->ref_mod;
Josef Bacikd2788502017-09-29 15:43:57 -0400521 existing->total_ref_mod += update->ref_mod;
Josef Bacik12621332015-02-03 07:50:16 -0800522
523 /*
524 * If we are going to from a positive ref mod to a negative or vice
525 * versa we need to make sure to adjust pending_csums accordingly.
526 */
Josef Bacikd2788502017-09-29 15:43:57 -0400527 if (existing->is_data) {
528 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
Josef Bacik12621332015-02-03 07:50:16 -0800529 delayed_refs->pending_csums -= existing->num_bytes;
Josef Bacikd2788502017-09-29 15:43:57 -0400530 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
Josef Bacik12621332015-02-03 07:50:16 -0800531 delayed_refs->pending_csums += existing->num_bytes;
532 }
Josef Bacikd2788502017-09-29 15:43:57 -0400533 spin_unlock(&existing->lock);
Chris Mason56bec292009-03-13 10:10:06 -0400534}
535
536/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400537 * helper function to actually insert a head node into the rbtree.
Chris Mason56bec292009-03-13 10:10:06 -0400538 * this does all the dirty work in terms of maintaining the correct
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400539 * overall modification count.
Chris Mason56bec292009-03-13 10:10:06 -0400540 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500541static noinline struct btrfs_delayed_ref_head *
542add_delayed_ref_head(struct btrfs_fs_info *fs_info,
543 struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400544 struct btrfs_delayed_ref_head *head_ref,
Qu Wenruo3368d002015-04-16 14:34:17 +0800545 struct btrfs_qgroup_extent_record *qrecord,
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800546 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
Omar Sandoval7be07912017-06-06 16:45:30 -0700547 int action, int is_data, int *qrecord_inserted_ret,
548 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400549{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500550 struct btrfs_delayed_ref_head *existing;
Chris Mason56bec292009-03-13 10:10:06 -0400551 struct btrfs_delayed_ref_root *delayed_refs;
552 int count_mod = 1;
553 int must_insert_reserved = 0;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800554 int qrecord_inserted = 0;
Chris Mason56bec292009-03-13 10:10:06 -0400555
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800556 /* If reserved is provided, it must be a data extent. */
557 BUG_ON(!is_data && reserved);
558
Chris Mason56bec292009-03-13 10:10:06 -0400559 /*
560 * the head node stores the sum of all the mods, so dropping a ref
561 * should drop the sum in the head node by one.
562 */
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400563 if (action == BTRFS_UPDATE_DELAYED_HEAD)
564 count_mod = 0;
565 else if (action == BTRFS_DROP_DELAYED_REF)
566 count_mod = -1;
Chris Mason56bec292009-03-13 10:10:06 -0400567
568 /*
569 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
570 * the reserved accounting when the extent is finally added, or
571 * if a later modification deletes the delayed ref without ever
572 * inserting the extent into the extent allocation tree.
573 * ref->must_insert_reserved is the flag used to record
574 * that accounting mods are required.
575 *
576 * Once we record must_insert_reserved, switch the action to
577 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
578 */
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400579 if (action == BTRFS_ADD_DELAYED_EXTENT)
Chris Mason56bec292009-03-13 10:10:06 -0400580 must_insert_reserved = 1;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400581 else
Chris Mason56bec292009-03-13 10:10:06 -0400582 must_insert_reserved = 0;
Chris Mason56bec292009-03-13 10:10:06 -0400583
584 delayed_refs = &trans->transaction->delayed_refs;
585
Josef Bacikd2788502017-09-29 15:43:57 -0400586 refcount_set(&head_ref->refs, 1);
587 head_ref->bytenr = bytenr;
588 head_ref->num_bytes = num_bytes;
589 head_ref->ref_mod = count_mod;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400590 head_ref->must_insert_reserved = must_insert_reserved;
591 head_ref->is_data = is_data;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800592 INIT_LIST_HEAD(&head_ref->ref_list);
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800593 INIT_LIST_HEAD(&head_ref->ref_add_list);
Josef Bacikd2788502017-09-29 15:43:57 -0400594 RB_CLEAR_NODE(&head_ref->href_node);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500595 head_ref->processing = 0;
Josef Bacik12621332015-02-03 07:50:16 -0800596 head_ref->total_ref_mod = count_mod;
Qu Wenruof64d5ca2015-09-08 17:08:36 +0800597 head_ref->qgroup_reserved = 0;
598 head_ref->qgroup_ref_root = 0;
Josef Bacikd2788502017-09-29 15:43:57 -0400599 spin_lock_init(&head_ref->lock);
600 mutex_init(&head_ref->mutex);
Chris Mason56bec292009-03-13 10:10:06 -0400601
Qu Wenruo3368d002015-04-16 14:34:17 +0800602 /* Record qgroup extent info if provided */
603 if (qrecord) {
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800604 if (ref_root && reserved) {
605 head_ref->qgroup_ref_root = ref_root;
606 head_ref->qgroup_reserved = reserved;
607 }
608
Qu Wenruo3368d002015-04-16 14:34:17 +0800609 qrecord->bytenr = bytenr;
610 qrecord->num_bytes = num_bytes;
611 qrecord->old_roots = NULL;
612
Qu Wenruo50b3e042016-10-18 09:31:27 +0800613 if(btrfs_qgroup_trace_extent_nolock(fs_info,
Qu Wenruocb93b522016-08-15 10:36:50 +0800614 delayed_refs, qrecord))
Qu Wenruo3368d002015-04-16 14:34:17 +0800615 kfree(qrecord);
Qu Wenruofb235dc2017-02-15 10:43:03 +0800616 else
617 qrecord_inserted = 1;
Qu Wenruo3368d002015-04-16 14:34:17 +0800618 }
619
Josef Bacikd2788502017-09-29 15:43:57 -0400620 trace_add_delayed_ref_head(fs_info, head_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000621
Josef Bacikd7df2c72014-01-23 09:21:38 -0500622 existing = htree_insert(&delayed_refs->href_root,
623 &head_ref->href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400624 if (existing) {
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800625 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
626 && existing->qgroup_reserved);
Josef Bacikd2788502017-09-29 15:43:57 -0400627 update_existing_head_ref(delayed_refs, existing, head_ref,
Omar Sandoval7be07912017-06-06 16:45:30 -0700628 old_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400629 /*
630 * we've updated the existing ref, free the newly
631 * allocated ref
632 */
Miao Xie78a61842012-11-21 02:21:28 +0000633 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500634 head_ref = existing;
Chris Mason56bec292009-03-13 10:10:06 -0400635 } else {
Omar Sandoval7be07912017-06-06 16:45:30 -0700636 if (old_ref_mod)
637 *old_ref_mod = 0;
Josef Bacik12621332015-02-03 07:50:16 -0800638 if (is_data && count_mod < 0)
639 delayed_refs->pending_csums += num_bytes;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400640 delayed_refs->num_heads++;
641 delayed_refs->num_heads_ready++;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500642 atomic_inc(&delayed_refs->num_entries);
Chris Mason56bec292009-03-13 10:10:06 -0400643 trans->delayed_ref_updates++;
644 }
Qu Wenruofb235dc2017-02-15 10:43:03 +0800645 if (qrecord_inserted_ret)
646 *qrecord_inserted_ret = qrecord_inserted;
Omar Sandoval7be07912017-06-06 16:45:30 -0700647 if (new_ref_mod)
648 *new_ref_mod = head_ref->total_ref_mod;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500649 return head_ref;
Chris Mason56bec292009-03-13 10:10:06 -0400650}
651
652/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400653 * helper to insert a delayed tree ref into the rbtree.
654 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500655static noinline void
656add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
657 struct btrfs_trans_handle *trans,
658 struct btrfs_delayed_ref_head *head_ref,
659 struct btrfs_delayed_ref_node *ref, u64 bytenr,
660 u64 num_bytes, u64 parent, u64 ref_root, int level,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100661 int action)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400662{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400663 struct btrfs_delayed_tree_ref *full_ref;
664 struct btrfs_delayed_ref_root *delayed_refs;
Arne Jansen00f04b82011-09-14 12:37:00 +0200665 u64 seq = 0;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800666 int ret;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400667
668 if (action == BTRFS_ADD_DELAYED_EXTENT)
669 action = BTRFS_ADD_DELAYED_REF;
670
Josef Bacikfcebe452014-05-13 17:30:47 -0700671 if (is_fstree(ref_root))
672 seq = atomic64_read(&fs_info->tree_mod_seq);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400673 delayed_refs = &trans->transaction->delayed_refs;
674
675 /* first set the basic ref node struct up */
Elena Reshetova6df8cdf2017-03-03 10:55:15 +0200676 refcount_set(&ref->refs, 1);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400677 ref->bytenr = bytenr;
678 ref->num_bytes = num_bytes;
679 ref->ref_mod = 1;
680 ref->action = action;
681 ref->is_head = 0;
682 ref->in_tree = 1;
Arne Jansen00f04b82011-09-14 12:37:00 +0200683 ref->seq = seq;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800684 INIT_LIST_HEAD(&ref->list);
685 INIT_LIST_HEAD(&ref->add_list);
Arne Jansen00f04b82011-09-14 12:37:00 +0200686
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400687 full_ref = btrfs_delayed_node_to_tree_ref(ref);
Arne Janseneebe0632011-09-14 14:01:24 +0200688 full_ref->parent = parent;
689 full_ref->root = ref_root;
690 if (parent)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400691 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
Arne Janseneebe0632011-09-14 14:01:24 +0200692 else
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400693 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400694 full_ref->level = level;
695
Jeff Mahoneybc074522016-06-09 17:27:55 -0400696 trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000697
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800698 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
699
700 /*
701 * XXX: memory should be freed at the same level allocated.
702 * But bad practice is anywhere... Follow it now. Need cleanup.
703 */
704 if (ret > 0)
Miao Xie78a61842012-11-21 02:21:28 +0000705 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400706}
707
708/*
709 * helper to insert a delayed data ref into the rbtree.
710 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500711static noinline void
712add_delayed_data_ref(struct btrfs_fs_info *fs_info,
713 struct btrfs_trans_handle *trans,
714 struct btrfs_delayed_ref_head *head_ref,
715 struct btrfs_delayed_ref_node *ref, u64 bytenr,
716 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100717 u64 offset, int action)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400718{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400719 struct btrfs_delayed_data_ref *full_ref;
720 struct btrfs_delayed_ref_root *delayed_refs;
Arne Jansen00f04b82011-09-14 12:37:00 +0200721 u64 seq = 0;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800722 int ret;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400723
724 if (action == BTRFS_ADD_DELAYED_EXTENT)
725 action = BTRFS_ADD_DELAYED_REF;
726
727 delayed_refs = &trans->transaction->delayed_refs;
728
Josef Bacikfcebe452014-05-13 17:30:47 -0700729 if (is_fstree(ref_root))
730 seq = atomic64_read(&fs_info->tree_mod_seq);
731
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400732 /* first set the basic ref node struct up */
Elena Reshetova6df8cdf2017-03-03 10:55:15 +0200733 refcount_set(&ref->refs, 1);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400734 ref->bytenr = bytenr;
735 ref->num_bytes = num_bytes;
736 ref->ref_mod = 1;
737 ref->action = action;
738 ref->is_head = 0;
739 ref->in_tree = 1;
Arne Jansen00f04b82011-09-14 12:37:00 +0200740 ref->seq = seq;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800741 INIT_LIST_HEAD(&ref->list);
742 INIT_LIST_HEAD(&ref->add_list);
Arne Jansen00f04b82011-09-14 12:37:00 +0200743
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400744 full_ref = btrfs_delayed_node_to_data_ref(ref);
Arne Janseneebe0632011-09-14 14:01:24 +0200745 full_ref->parent = parent;
746 full_ref->root = ref_root;
747 if (parent)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400748 ref->type = BTRFS_SHARED_DATA_REF_KEY;
Arne Janseneebe0632011-09-14 14:01:24 +0200749 else
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400750 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200751
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400752 full_ref->objectid = owner;
753 full_ref->offset = offset;
754
Jeff Mahoneybc074522016-06-09 17:27:55 -0400755 trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000756
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800757 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
758
759 if (ret > 0)
Miao Xie78a61842012-11-21 02:21:28 +0000760 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400761}
762
763/*
764 * add a delayed tree ref. This does all of the accounting required
Chris Mason56bec292009-03-13 10:10:06 -0400765 * to make sure the delayed ref is eventually processed before this
766 * transaction commits.
767 */
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200768int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
769 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400770 u64 bytenr, u64 num_bytes, u64 parent,
771 u64 ref_root, int level, int action,
Omar Sandoval7be07912017-06-06 16:45:30 -0700772 struct btrfs_delayed_extent_op *extent_op,
773 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400774{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400775 struct btrfs_delayed_tree_ref *ref;
Chris Mason56bec292009-03-13 10:10:06 -0400776 struct btrfs_delayed_ref_head *head_ref;
777 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800778 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800779 int qrecord_inserted;
Chris Mason56bec292009-03-13 10:10:06 -0400780
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400781 BUG_ON(extent_op && extent_op->is_data);
Miao Xie78a61842012-11-21 02:21:28 +0000782 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
Chris Mason56bec292009-03-13 10:10:06 -0400783 if (!ref)
784 return -ENOMEM;
785
Miao Xie78a61842012-11-21 02:21:28 +0000786 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Dan Carpenter5a5003df2015-06-24 17:32:33 +0300787 if (!head_ref)
788 goto free_ref;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400789
Josef Bacikafcdd122016-09-02 15:40:02 -0400790 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
791 is_fstree(ref_root)) {
Qu Wenruo3368d002015-04-16 14:34:17 +0800792 record = kmalloc(sizeof(*record), GFP_NOFS);
Dan Carpenter5a5003df2015-06-24 17:32:33 +0300793 if (!record)
794 goto free_head_ref;
Qu Wenruo3368d002015-04-16 14:34:17 +0800795 }
796
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400797 head_ref->extent_op = extent_op;
798
Chris Mason56bec292009-03-13 10:10:06 -0400799 delayed_refs = &trans->transaction->delayed_refs;
800 spin_lock(&delayed_refs->lock);
801
802 /*
803 * insert both the head node and the new ref without dropping
804 * the spin lock
805 */
Josef Bacikd2788502017-09-29 15:43:57 -0400806 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
Qu Wenruofb235dc2017-02-15 10:43:03 +0800807 bytenr, num_bytes, 0, 0, action, 0,
Omar Sandoval7be07912017-06-06 16:45:30 -0700808 &qrecord_inserted, old_ref_mod,
809 new_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400810
Josef Bacikd7df2c72014-01-23 09:21:38 -0500811 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100812 num_bytes, parent, ref_root, level, action);
Chris Mason56bec292009-03-13 10:10:06 -0400813 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200814
Qu Wenruofb235dc2017-02-15 10:43:03 +0800815 if (qrecord_inserted)
816 return btrfs_qgroup_trace_extent_post(fs_info, record);
Chris Mason56bec292009-03-13 10:10:06 -0400817 return 0;
Dan Carpenter5a5003df2015-06-24 17:32:33 +0300818
819free_head_ref:
820 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
821free_ref:
822 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
823
824 return -ENOMEM;
Chris Mason56bec292009-03-13 10:10:06 -0400825}
826
827/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400828 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
829 */
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200830int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
831 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400832 u64 bytenr, u64 num_bytes,
833 u64 parent, u64 ref_root,
Omar Sandoval7be07912017-06-06 16:45:30 -0700834 u64 owner, u64 offset, u64 reserved, int action,
835 int *old_ref_mod, int *new_ref_mod)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400836{
837 struct btrfs_delayed_data_ref *ref;
838 struct btrfs_delayed_ref_head *head_ref;
839 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800840 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800841 int qrecord_inserted;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400842
Miao Xie78a61842012-11-21 02:21:28 +0000843 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400844 if (!ref)
845 return -ENOMEM;
846
Miao Xie78a61842012-11-21 02:21:28 +0000847 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400848 if (!head_ref) {
Miao Xie78a61842012-11-21 02:21:28 +0000849 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400850 return -ENOMEM;
851 }
852
Josef Bacikafcdd122016-09-02 15:40:02 -0400853 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
854 is_fstree(ref_root)) {
Qu Wenruo3368d002015-04-16 14:34:17 +0800855 record = kmalloc(sizeof(*record), GFP_NOFS);
856 if (!record) {
857 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
858 kmem_cache_free(btrfs_delayed_ref_head_cachep,
859 head_ref);
860 return -ENOMEM;
861 }
862 }
863
Jeff Mahoneyfef394f2016-12-13 14:39:34 -0500864 head_ref->extent_op = NULL;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400865
866 delayed_refs = &trans->transaction->delayed_refs;
867 spin_lock(&delayed_refs->lock);
868
869 /*
870 * insert both the head node and the new ref without dropping
871 * the spin lock
872 */
Josef Bacikd2788502017-09-29 15:43:57 -0400873 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800874 bytenr, num_bytes, ref_root, reserved,
Omar Sandoval7be07912017-06-06 16:45:30 -0700875 action, 1, &qrecord_inserted,
876 old_ref_mod, new_ref_mod);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400877
Josef Bacikd7df2c72014-01-23 09:21:38 -0500878 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200879 num_bytes, parent, ref_root, owner, offset,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100880 action);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400881 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200882
Qu Wenruofb235dc2017-02-15 10:43:03 +0800883 if (qrecord_inserted)
884 return btrfs_qgroup_trace_extent_post(fs_info, record);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400885 return 0;
886}
887
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200888int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
889 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400890 u64 bytenr, u64 num_bytes,
891 struct btrfs_delayed_extent_op *extent_op)
892{
893 struct btrfs_delayed_ref_head *head_ref;
894 struct btrfs_delayed_ref_root *delayed_refs;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400895
Miao Xie78a61842012-11-21 02:21:28 +0000896 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400897 if (!head_ref)
898 return -ENOMEM;
899
900 head_ref->extent_op = extent_op;
901
902 delayed_refs = &trans->transaction->delayed_refs;
903 spin_lock(&delayed_refs->lock);
904
Josef Bacikd2788502017-09-29 15:43:57 -0400905 add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800906 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
Omar Sandoval7be07912017-06-06 16:45:30 -0700907 extent_op->is_data, NULL, NULL, NULL);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400908
909 spin_unlock(&delayed_refs->lock);
910 return 0;
911}
912
913/*
Chris Mason1887be62009-03-13 10:11:24 -0400914 * this does a simple search for the head node for a given extent.
915 * It must be called with the delayed ref spinlock held, and it returns
916 * the head node if any where found, or NULL if not.
917 */
918struct btrfs_delayed_ref_head *
Liu Bof72ad18e2017-01-30 12:24:37 -0800919btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
Chris Mason1887be62009-03-13 10:11:24 -0400920{
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000921 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
Chris Mason1887be62009-03-13 10:11:24 -0400922}
Miao Xie78a61842012-11-21 02:21:28 +0000923
924void btrfs_delayed_ref_exit(void)
925{
Kinglong Mee5598e902016-01-29 21:36:35 +0800926 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
927 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
928 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
929 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
Miao Xie78a61842012-11-21 02:21:28 +0000930}
931
932int btrfs_delayed_ref_init(void)
933{
934 btrfs_delayed_ref_head_cachep = kmem_cache_create(
935 "btrfs_delayed_ref_head",
936 sizeof(struct btrfs_delayed_ref_head), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300937 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000938 if (!btrfs_delayed_ref_head_cachep)
939 goto fail;
940
941 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
942 "btrfs_delayed_tree_ref",
943 sizeof(struct btrfs_delayed_tree_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300944 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000945 if (!btrfs_delayed_tree_ref_cachep)
946 goto fail;
947
948 btrfs_delayed_data_ref_cachep = kmem_cache_create(
949 "btrfs_delayed_data_ref",
950 sizeof(struct btrfs_delayed_data_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300951 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000952 if (!btrfs_delayed_data_ref_cachep)
953 goto fail;
954
955 btrfs_delayed_extent_op_cachep = kmem_cache_create(
956 "btrfs_delayed_extent_op",
957 sizeof(struct btrfs_delayed_extent_op), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300958 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000959 if (!btrfs_delayed_extent_op_cachep)
960 goto fail;
961
962 return 0;
963fail:
964 btrfs_delayed_ref_exit();
965 return -ENOMEM;
966}