blob: 8c7d7db01f7aa750d0ea280b4be21520db7e00ed [file] [log] [blame]
Chris Mason56bec292009-03-13 10:10:06 -04001/*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/slab.h>
Chris Mason56bec292009-03-13 10:10:06 -040021#include <linux/sort.h>
Chris Mason56bec292009-03-13 10:10:06 -040022#include "ctree.h"
23#include "delayed-ref.h"
24#include "transaction.h"
Qu Wenruo3368d002015-04-16 14:34:17 +080025#include "qgroup.h"
Chris Mason56bec292009-03-13 10:10:06 -040026
Miao Xie78a61842012-11-21 02:21:28 +000027struct kmem_cache *btrfs_delayed_ref_head_cachep;
28struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29struct kmem_cache *btrfs_delayed_data_ref_cachep;
30struct kmem_cache *btrfs_delayed_extent_op_cachep;
Chris Mason56bec292009-03-13 10:10:06 -040031/*
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
Chris Mason56bec292009-03-13 10:10:06 -040038 */
39
40/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -040041 * compare two delayed tree backrefs with same bytenr and type
Chris Mason56bec292009-03-13 10:10:06 -040042 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -040043static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
44 struct btrfs_delayed_tree_ref *ref2)
Chris Mason56bec292009-03-13 10:10:06 -040045{
Josef Bacik3b60d432017-09-29 15:43:58 -040046 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
Josef Bacik41b0fc42013-04-01 20:36:28 -040047 if (ref1->root < ref2->root)
48 return -1;
49 if (ref1->root > ref2->root)
50 return 1;
51 } else {
52 if (ref1->parent < ref2->parent)
53 return -1;
54 if (ref1->parent > ref2->parent)
55 return 1;
56 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -040057 return 0;
58}
59
60/*
61 * compare two delayed data backrefs with same bytenr and type
62 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -040063static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
64 struct btrfs_delayed_data_ref *ref2)
Yan Zheng5d4f98a2009-06-10 10:45:14 -040065{
66 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 if (ref1->root < ref2->root)
68 return -1;
69 if (ref1->root > ref2->root)
70 return 1;
71 if (ref1->objectid < ref2->objectid)
72 return -1;
73 if (ref1->objectid > ref2->objectid)
74 return 1;
75 if (ref1->offset < ref2->offset)
76 return -1;
77 if (ref1->offset > ref2->offset)
78 return 1;
79 } else {
80 if (ref1->parent < ref2->parent)
81 return -1;
82 if (ref1->parent > ref2->parent)
83 return 1;
84 }
85 return 0;
86}
87
Josef Bacik1d148e52017-10-19 14:15:59 -040088static int comp_refs(struct btrfs_delayed_ref_node *ref1,
89 struct btrfs_delayed_ref_node *ref2,
90 bool check_seq)
91{
92 int ret = 0;
93
94 if (ref1->type < ref2->type)
95 return -1;
96 if (ref1->type > ref2->type)
97 return 1;
98 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
99 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
100 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
101 btrfs_delayed_node_to_tree_ref(ref2));
102 else
103 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
104 btrfs_delayed_node_to_data_ref(ref2));
105 if (ret)
106 return ret;
107 if (check_seq) {
108 if (ref1->seq < ref2->seq)
109 return -1;
110 if (ref1->seq > ref2->seq)
111 return 1;
112 }
113 return 0;
114}
115
Liu Boc46effa2013-10-14 12:59:45 +0800116/* insert a new ref to head ref rbtree */
117static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
118 struct rb_node *node)
119{
120 struct rb_node **p = &root->rb_node;
121 struct rb_node *parent_node = NULL;
122 struct btrfs_delayed_ref_head *entry;
123 struct btrfs_delayed_ref_head *ins;
124 u64 bytenr;
125
126 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
Josef Bacikd2788502017-09-29 15:43:57 -0400127 bytenr = ins->bytenr;
Liu Boc46effa2013-10-14 12:59:45 +0800128 while (*p) {
129 parent_node = *p;
130 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
131 href_node);
132
Josef Bacikd2788502017-09-29 15:43:57 -0400133 if (bytenr < entry->bytenr)
Liu Boc46effa2013-10-14 12:59:45 +0800134 p = &(*p)->rb_left;
Josef Bacikd2788502017-09-29 15:43:57 -0400135 else if (bytenr > entry->bytenr)
Liu Boc46effa2013-10-14 12:59:45 +0800136 p = &(*p)->rb_right;
137 else
138 return entry;
139 }
140
141 rb_link_node(node, parent_node, p);
142 rb_insert_color(node, root);
143 return NULL;
144}
145
Chris Mason56bec292009-03-13 10:10:06 -0400146/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400147 * find an head entry based on bytenr. This returns the delayed ref
Arne Jansend1270cd2011-09-13 15:16:43 +0200148 * head if it was able to find one, or NULL if nothing was in that spot.
149 * If return_bigger is given, the next bigger entry is returned if no exact
150 * match is found.
Chris Mason56bec292009-03-13 10:10:06 -0400151 */
Liu Boc46effa2013-10-14 12:59:45 +0800152static struct btrfs_delayed_ref_head *
153find_ref_head(struct rb_root *root, u64 bytenr,
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000154 int return_bigger)
Chris Mason56bec292009-03-13 10:10:06 -0400155{
Arne Jansend1270cd2011-09-13 15:16:43 +0200156 struct rb_node *n;
Liu Boc46effa2013-10-14 12:59:45 +0800157 struct btrfs_delayed_ref_head *entry;
Chris Mason56bec292009-03-13 10:10:06 -0400158
Arne Jansend1270cd2011-09-13 15:16:43 +0200159 n = root->rb_node;
160 entry = NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400161 while (n) {
Liu Boc46effa2013-10-14 12:59:45 +0800162 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400163
Josef Bacikd2788502017-09-29 15:43:57 -0400164 if (bytenr < entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400165 n = n->rb_left;
Josef Bacikd2788502017-09-29 15:43:57 -0400166 else if (bytenr > entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400167 n = n->rb_right;
168 else
169 return entry;
170 }
Arne Jansend1270cd2011-09-13 15:16:43 +0200171 if (entry && return_bigger) {
Josef Bacikd2788502017-09-29 15:43:57 -0400172 if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800173 n = rb_next(&entry->href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200174 if (!n)
175 n = rb_first(root);
Liu Boc46effa2013-10-14 12:59:45 +0800176 entry = rb_entry(n, struct btrfs_delayed_ref_head,
177 href_node);
Filipe Manana6103fb42014-02-12 15:07:52 +0000178 return entry;
Arne Jansend1270cd2011-09-13 15:16:43 +0200179 }
180 return entry;
181 }
Chris Mason56bec292009-03-13 10:10:06 -0400182 return NULL;
183}
184
Chris Masonc3e69d52009-03-13 10:17:05 -0400185int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
186 struct btrfs_delayed_ref_head *head)
Chris Mason56bec292009-03-13 10:10:06 -0400187{
Chris Masonc3e69d52009-03-13 10:17:05 -0400188 struct btrfs_delayed_ref_root *delayed_refs;
Chris Mason56bec292009-03-13 10:10:06 -0400189
Chris Masonc3e69d52009-03-13 10:17:05 -0400190 delayed_refs = &trans->transaction->delayed_refs;
191 assert_spin_locked(&delayed_refs->lock);
192 if (mutex_trylock(&head->mutex))
193 return 0;
194
Josef Bacikd2788502017-09-29 15:43:57 -0400195 refcount_inc(&head->refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400196 spin_unlock(&delayed_refs->lock);
197
198 mutex_lock(&head->mutex);
199 spin_lock(&delayed_refs->lock);
Josef Bacikd2788502017-09-29 15:43:57 -0400200 if (RB_EMPTY_NODE(&head->href_node)) {
Chris Masonc3e69d52009-03-13 10:17:05 -0400201 mutex_unlock(&head->mutex);
Josef Bacikd2788502017-09-29 15:43:57 -0400202 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400203 return -EAGAIN;
204 }
Josef Bacikd2788502017-09-29 15:43:57 -0400205 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400206 return 0;
207}
208
Stefan Behrens35a36212013-08-14 18:12:25 +0200209static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
Josef Bacikae1e2062012-08-07 16:00:32 -0400210 struct btrfs_delayed_ref_root *delayed_refs,
Josef Bacikd7df2c72014-01-23 09:21:38 -0500211 struct btrfs_delayed_ref_head *head,
Josef Bacikae1e2062012-08-07 16:00:32 -0400212 struct btrfs_delayed_ref_node *ref)
213{
Josef Bacikd2788502017-09-29 15:43:57 -0400214 assert_spin_locked(&head->lock);
215 list_del(&ref->list);
216 if (!list_empty(&ref->add_list))
217 list_del(&ref->add_list);
Josef Bacikae1e2062012-08-07 16:00:32 -0400218 ref->in_tree = 0;
219 btrfs_put_delayed_ref(ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500220 atomic_dec(&delayed_refs->num_entries);
Josef Bacikae1e2062012-08-07 16:00:32 -0400221 if (trans->delayed_ref_updates)
222 trans->delayed_ref_updates--;
223}
224
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100225static bool merge_ref(struct btrfs_trans_handle *trans,
226 struct btrfs_delayed_ref_root *delayed_refs,
227 struct btrfs_delayed_ref_head *head,
228 struct btrfs_delayed_ref_node *ref,
229 u64 seq)
230{
231 struct btrfs_delayed_ref_node *next;
232 bool done = false;
233
234 next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
235 list);
236 while (!done && &next->list != &head->ref_list) {
237 int mod;
238 struct btrfs_delayed_ref_node *next2;
239
240 next2 = list_next_entry(next, list);
241
242 if (next == ref)
243 goto next;
244
245 if (seq && next->seq >= seq)
246 goto next;
247
Josef Bacik1d148e52017-10-19 14:15:59 -0400248 if (comp_refs(ref, next, false))
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100249 goto next;
250
251 if (ref->action == next->action) {
252 mod = next->ref_mod;
253 } else {
254 if (ref->ref_mod < next->ref_mod) {
255 swap(ref, next);
256 done = true;
257 }
258 mod = -next->ref_mod;
259 }
260
261 drop_delayed_ref(trans, delayed_refs, head, next);
262 ref->ref_mod += mod;
263 if (ref->ref_mod == 0) {
264 drop_delayed_ref(trans, delayed_refs, head, ref);
265 done = true;
266 } else {
267 /*
268 * Can't have multiples of the same ref on a tree block.
269 */
270 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
271 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
272 }
273next:
274 next = next2;
275 }
276
277 return done;
278}
279
280void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
281 struct btrfs_fs_info *fs_info,
282 struct btrfs_delayed_ref_root *delayed_refs,
283 struct btrfs_delayed_ref_head *head)
284{
285 struct btrfs_delayed_ref_node *ref;
286 u64 seq = 0;
287
288 assert_spin_locked(&head->lock);
289
290 if (list_empty(&head->ref_list))
291 return;
292
293 /* We don't have too many refs to merge for data. */
294 if (head->is_data)
295 return;
296
297 spin_lock(&fs_info->tree_mod_seq_lock);
298 if (!list_empty(&fs_info->tree_mod_seq_list)) {
299 struct seq_list *elem;
300
301 elem = list_first_entry(&fs_info->tree_mod_seq_list,
302 struct seq_list, list);
303 seq = elem->seq;
304 }
305 spin_unlock(&fs_info->tree_mod_seq_lock);
306
307 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
308 list);
309 while (&ref->list != &head->ref_list) {
310 if (seq && ref->seq >= seq)
311 goto next;
312
313 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
314 if (list_empty(&head->ref_list))
315 break;
316 ref = list_first_entry(&head->ref_list,
317 struct btrfs_delayed_ref_node,
318 list);
319 continue;
320 }
321next:
322 ref = list_next_entry(ref, list);
323 }
324}
325
Jan Schmidt097b8a72012-06-21 11:08:04 +0200326int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
327 struct btrfs_delayed_ref_root *delayed_refs,
Arne Jansen00f04b82011-09-14 12:37:00 +0200328 u64 seq)
329{
330 struct seq_list *elem;
Jan Schmidt097b8a72012-06-21 11:08:04 +0200331 int ret = 0;
Arne Jansen00f04b82011-09-14 12:37:00 +0200332
Jan Schmidt097b8a72012-06-21 11:08:04 +0200333 spin_lock(&fs_info->tree_mod_seq_lock);
334 if (!list_empty(&fs_info->tree_mod_seq_list)) {
335 elem = list_first_entry(&fs_info->tree_mod_seq_list,
336 struct seq_list, list);
337 if (seq >= elem->seq) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400338 btrfs_debug(fs_info,
339 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
340 (u32)(seq >> 32), (u32)seq,
341 (u32)(elem->seq >> 32), (u32)elem->seq,
342 delayed_refs);
Jan Schmidt097b8a72012-06-21 11:08:04 +0200343 ret = 1;
344 }
Arne Jansen00f04b82011-09-14 12:37:00 +0200345 }
Jan Schmidt097b8a72012-06-21 11:08:04 +0200346
347 spin_unlock(&fs_info->tree_mod_seq_lock);
348 return ret;
Arne Jansen00f04b82011-09-14 12:37:00 +0200349}
350
Josef Bacikd7df2c72014-01-23 09:21:38 -0500351struct btrfs_delayed_ref_head *
352btrfs_select_ref_head(struct btrfs_trans_handle *trans)
Chris Masonc3e69d52009-03-13 10:17:05 -0400353{
Chris Masonc3e69d52009-03-13 10:17:05 -0400354 struct btrfs_delayed_ref_root *delayed_refs;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500355 struct btrfs_delayed_ref_head *head;
356 u64 start;
357 bool loop = false;
Chris Masonc3e69d52009-03-13 10:17:05 -0400358
359 delayed_refs = &trans->transaction->delayed_refs;
Liu Boc46effa2013-10-14 12:59:45 +0800360
Chris Masonc3e69d52009-03-13 10:17:05 -0400361again:
Josef Bacikd7df2c72014-01-23 09:21:38 -0500362 start = delayed_refs->run_delayed_start;
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000363 head = find_ref_head(&delayed_refs->href_root, start, 1);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500364 if (!head && !loop) {
365 delayed_refs->run_delayed_start = 0;
Chris Masonc3e69d52009-03-13 10:17:05 -0400366 start = 0;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500367 loop = true;
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000368 head = find_ref_head(&delayed_refs->href_root, start, 1);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500369 if (!head)
370 return NULL;
371 } else if (!head && loop) {
372 return NULL;
Chris Masonc3e69d52009-03-13 10:17:05 -0400373 }
Chris Mason56bec292009-03-13 10:10:06 -0400374
Josef Bacikd7df2c72014-01-23 09:21:38 -0500375 while (head->processing) {
376 struct rb_node *node;
Miao Xie093486c2012-12-19 08:10:10 +0000377
Josef Bacikd7df2c72014-01-23 09:21:38 -0500378 node = rb_next(&head->href_node);
379 if (!node) {
380 if (loop)
381 return NULL;
382 delayed_refs->run_delayed_start = 0;
383 start = 0;
384 loop = true;
385 goto again;
386 }
387 head = rb_entry(node, struct btrfs_delayed_ref_head,
388 href_node);
389 }
390
391 head->processing = 1;
392 WARN_ON(delayed_refs->num_heads_ready == 0);
393 delayed_refs->num_heads_ready--;
Josef Bacikd2788502017-09-29 15:43:57 -0400394 delayed_refs->run_delayed_start = head->bytenr +
395 head->num_bytes;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500396 return head;
Miao Xie093486c2012-12-19 08:10:10 +0000397}
398
Chris Mason56bec292009-03-13 10:10:06 -0400399/*
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800400 * Helper to insert the ref_node to the tail or merge with tail.
401 *
402 * Return 0 for insert.
403 * Return >0 for merge.
404 */
405static int
406add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
407 struct btrfs_delayed_ref_root *root,
408 struct btrfs_delayed_ref_head *href,
409 struct btrfs_delayed_ref_node *ref)
410{
411 struct btrfs_delayed_ref_node *exist;
412 int mod;
413 int ret = 0;
414
415 spin_lock(&href->lock);
416 /* Check whether we can merge the tail node with ref */
417 if (list_empty(&href->ref_list))
418 goto add_tail;
419 exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
420 list);
421 /* No need to compare bytenr nor is_head */
Josef Bacik1d148e52017-10-19 14:15:59 -0400422 if (comp_refs(exist, ref, true))
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800423 goto add_tail;
424
425 /* Now we are sure we can merge */
426 ret = 1;
427 if (exist->action == ref->action) {
428 mod = ref->ref_mod;
429 } else {
430 /* Need to change action */
431 if (exist->ref_mod < ref->ref_mod) {
432 exist->action = ref->action;
433 mod = -exist->ref_mod;
434 exist->ref_mod = ref->ref_mod;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800435 if (ref->action == BTRFS_ADD_DELAYED_REF)
436 list_add_tail(&exist->add_list,
437 &href->ref_add_list);
438 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
439 ASSERT(!list_empty(&exist->add_list));
440 list_del(&exist->add_list);
441 } else {
442 ASSERT(0);
443 }
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800444 } else
445 mod = -ref->ref_mod;
446 }
447 exist->ref_mod += mod;
448
449 /* remove existing tail if its ref_mod is zero */
450 if (exist->ref_mod == 0)
451 drop_delayed_ref(trans, root, href, exist);
452 spin_unlock(&href->lock);
453 return ret;
454
455add_tail:
456 list_add_tail(&ref->list, &href->ref_list);
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800457 if (ref->action == BTRFS_ADD_DELAYED_REF)
458 list_add_tail(&ref->add_list, &href->ref_add_list);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800459 atomic_inc(&root->num_entries);
460 trans->delayed_ref_updates++;
461 spin_unlock(&href->lock);
462 return ret;
463}
464
465/*
Chris Mason56bec292009-03-13 10:10:06 -0400466 * helper function to update the accounting in the head ref
467 * existing and update must have the same bytenr
468 */
469static noinline void
Josef Bacik12621332015-02-03 07:50:16 -0800470update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
Josef Bacikd2788502017-09-29 15:43:57 -0400471 struct btrfs_delayed_ref_head *existing,
472 struct btrfs_delayed_ref_head *update,
Omar Sandoval7be07912017-06-06 16:45:30 -0700473 int *old_ref_mod_ret)
Chris Mason56bec292009-03-13 10:10:06 -0400474{
Josef Bacik12621332015-02-03 07:50:16 -0800475 int old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400476
Josef Bacikd2788502017-09-29 15:43:57 -0400477 BUG_ON(existing->is_data != update->is_data);
Chris Mason56bec292009-03-13 10:10:06 -0400478
Josef Bacikd2788502017-09-29 15:43:57 -0400479 spin_lock(&existing->lock);
480 if (update->must_insert_reserved) {
Chris Mason56bec292009-03-13 10:10:06 -0400481 /* if the extent was freed and then
482 * reallocated before the delayed ref
483 * entries were processed, we can end up
484 * with an existing head ref without
485 * the must_insert_reserved flag set.
486 * Set it again here
487 */
Josef Bacikd2788502017-09-29 15:43:57 -0400488 existing->must_insert_reserved = update->must_insert_reserved;
Chris Mason56bec292009-03-13 10:10:06 -0400489
490 /*
491 * update the num_bytes so we make sure the accounting
492 * is done correctly
493 */
494 existing->num_bytes = update->num_bytes;
495
496 }
497
Josef Bacikd2788502017-09-29 15:43:57 -0400498 if (update->extent_op) {
499 if (!existing->extent_op) {
500 existing->extent_op = update->extent_op;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400501 } else {
Josef Bacikd2788502017-09-29 15:43:57 -0400502 if (update->extent_op->update_key) {
503 memcpy(&existing->extent_op->key,
504 &update->extent_op->key,
505 sizeof(update->extent_op->key));
506 existing->extent_op->update_key = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400507 }
Josef Bacikd2788502017-09-29 15:43:57 -0400508 if (update->extent_op->update_flags) {
509 existing->extent_op->flags_to_set |=
510 update->extent_op->flags_to_set;
511 existing->extent_op->update_flags = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400512 }
Josef Bacikd2788502017-09-29 15:43:57 -0400513 btrfs_free_delayed_extent_op(update->extent_op);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400514 }
515 }
Chris Mason56bec292009-03-13 10:10:06 -0400516 /*
Josef Bacikd7df2c72014-01-23 09:21:38 -0500517 * update the reference mod on the head to reflect this new operation,
518 * only need the lock for this case cause we could be processing it
519 * currently, for refs we just added we know we're a-ok.
Chris Mason56bec292009-03-13 10:10:06 -0400520 */
Josef Bacikd2788502017-09-29 15:43:57 -0400521 old_ref_mod = existing->total_ref_mod;
Omar Sandoval7be07912017-06-06 16:45:30 -0700522 if (old_ref_mod_ret)
523 *old_ref_mod_ret = old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400524 existing->ref_mod += update->ref_mod;
Josef Bacikd2788502017-09-29 15:43:57 -0400525 existing->total_ref_mod += update->ref_mod;
Josef Bacik12621332015-02-03 07:50:16 -0800526
527 /*
528 * If we are going to from a positive ref mod to a negative or vice
529 * versa we need to make sure to adjust pending_csums accordingly.
530 */
Josef Bacikd2788502017-09-29 15:43:57 -0400531 if (existing->is_data) {
532 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
Josef Bacik12621332015-02-03 07:50:16 -0800533 delayed_refs->pending_csums -= existing->num_bytes;
Josef Bacikd2788502017-09-29 15:43:57 -0400534 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
Josef Bacik12621332015-02-03 07:50:16 -0800535 delayed_refs->pending_csums += existing->num_bytes;
536 }
Josef Bacikd2788502017-09-29 15:43:57 -0400537 spin_unlock(&existing->lock);
Chris Mason56bec292009-03-13 10:10:06 -0400538}
539
540/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400541 * helper function to actually insert a head node into the rbtree.
Chris Mason56bec292009-03-13 10:10:06 -0400542 * this does all the dirty work in terms of maintaining the correct
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400543 * overall modification count.
Chris Mason56bec292009-03-13 10:10:06 -0400544 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500545static noinline struct btrfs_delayed_ref_head *
546add_delayed_ref_head(struct btrfs_fs_info *fs_info,
547 struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400548 struct btrfs_delayed_ref_head *head_ref,
Qu Wenruo3368d002015-04-16 14:34:17 +0800549 struct btrfs_qgroup_extent_record *qrecord,
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800550 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
Omar Sandoval7be07912017-06-06 16:45:30 -0700551 int action, int is_data, int *qrecord_inserted_ret,
552 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400553{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500554 struct btrfs_delayed_ref_head *existing;
Chris Mason56bec292009-03-13 10:10:06 -0400555 struct btrfs_delayed_ref_root *delayed_refs;
556 int count_mod = 1;
557 int must_insert_reserved = 0;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800558 int qrecord_inserted = 0;
Chris Mason56bec292009-03-13 10:10:06 -0400559
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800560 /* If reserved is provided, it must be a data extent. */
561 BUG_ON(!is_data && reserved);
562
Chris Mason56bec292009-03-13 10:10:06 -0400563 /*
564 * the head node stores the sum of all the mods, so dropping a ref
565 * should drop the sum in the head node by one.
566 */
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400567 if (action == BTRFS_UPDATE_DELAYED_HEAD)
568 count_mod = 0;
569 else if (action == BTRFS_DROP_DELAYED_REF)
570 count_mod = -1;
Chris Mason56bec292009-03-13 10:10:06 -0400571
572 /*
573 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
574 * the reserved accounting when the extent is finally added, or
575 * if a later modification deletes the delayed ref without ever
576 * inserting the extent into the extent allocation tree.
577 * ref->must_insert_reserved is the flag used to record
578 * that accounting mods are required.
579 *
580 * Once we record must_insert_reserved, switch the action to
581 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
582 */
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400583 if (action == BTRFS_ADD_DELAYED_EXTENT)
Chris Mason56bec292009-03-13 10:10:06 -0400584 must_insert_reserved = 1;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400585 else
Chris Mason56bec292009-03-13 10:10:06 -0400586 must_insert_reserved = 0;
Chris Mason56bec292009-03-13 10:10:06 -0400587
588 delayed_refs = &trans->transaction->delayed_refs;
589
Josef Bacikd2788502017-09-29 15:43:57 -0400590 refcount_set(&head_ref->refs, 1);
591 head_ref->bytenr = bytenr;
592 head_ref->num_bytes = num_bytes;
593 head_ref->ref_mod = count_mod;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400594 head_ref->must_insert_reserved = must_insert_reserved;
595 head_ref->is_data = is_data;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800596 INIT_LIST_HEAD(&head_ref->ref_list);
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800597 INIT_LIST_HEAD(&head_ref->ref_add_list);
Josef Bacikd2788502017-09-29 15:43:57 -0400598 RB_CLEAR_NODE(&head_ref->href_node);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500599 head_ref->processing = 0;
Josef Bacik12621332015-02-03 07:50:16 -0800600 head_ref->total_ref_mod = count_mod;
Qu Wenruof64d5ca2015-09-08 17:08:36 +0800601 head_ref->qgroup_reserved = 0;
602 head_ref->qgroup_ref_root = 0;
Josef Bacikd2788502017-09-29 15:43:57 -0400603 spin_lock_init(&head_ref->lock);
604 mutex_init(&head_ref->mutex);
Chris Mason56bec292009-03-13 10:10:06 -0400605
Qu Wenruo3368d002015-04-16 14:34:17 +0800606 /* Record qgroup extent info if provided */
607 if (qrecord) {
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800608 if (ref_root && reserved) {
609 head_ref->qgroup_ref_root = ref_root;
610 head_ref->qgroup_reserved = reserved;
611 }
612
Qu Wenruo3368d002015-04-16 14:34:17 +0800613 qrecord->bytenr = bytenr;
614 qrecord->num_bytes = num_bytes;
615 qrecord->old_roots = NULL;
616
Qu Wenruo50b3e042016-10-18 09:31:27 +0800617 if(btrfs_qgroup_trace_extent_nolock(fs_info,
Qu Wenruocb93b522016-08-15 10:36:50 +0800618 delayed_refs, qrecord))
Qu Wenruo3368d002015-04-16 14:34:17 +0800619 kfree(qrecord);
Qu Wenruofb235dc2017-02-15 10:43:03 +0800620 else
621 qrecord_inserted = 1;
Qu Wenruo3368d002015-04-16 14:34:17 +0800622 }
623
Josef Bacikd2788502017-09-29 15:43:57 -0400624 trace_add_delayed_ref_head(fs_info, head_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000625
Josef Bacikd7df2c72014-01-23 09:21:38 -0500626 existing = htree_insert(&delayed_refs->href_root,
627 &head_ref->href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400628 if (existing) {
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800629 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
630 && existing->qgroup_reserved);
Josef Bacikd2788502017-09-29 15:43:57 -0400631 update_existing_head_ref(delayed_refs, existing, head_ref,
Omar Sandoval7be07912017-06-06 16:45:30 -0700632 old_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400633 /*
634 * we've updated the existing ref, free the newly
635 * allocated ref
636 */
Miao Xie78a61842012-11-21 02:21:28 +0000637 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500638 head_ref = existing;
Chris Mason56bec292009-03-13 10:10:06 -0400639 } else {
Omar Sandoval7be07912017-06-06 16:45:30 -0700640 if (old_ref_mod)
641 *old_ref_mod = 0;
Josef Bacik12621332015-02-03 07:50:16 -0800642 if (is_data && count_mod < 0)
643 delayed_refs->pending_csums += num_bytes;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400644 delayed_refs->num_heads++;
645 delayed_refs->num_heads_ready++;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500646 atomic_inc(&delayed_refs->num_entries);
Chris Mason56bec292009-03-13 10:10:06 -0400647 trans->delayed_ref_updates++;
648 }
Qu Wenruofb235dc2017-02-15 10:43:03 +0800649 if (qrecord_inserted_ret)
650 *qrecord_inserted_ret = qrecord_inserted;
Omar Sandoval7be07912017-06-06 16:45:30 -0700651 if (new_ref_mod)
652 *new_ref_mod = head_ref->total_ref_mod;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500653 return head_ref;
Chris Mason56bec292009-03-13 10:10:06 -0400654}
655
656/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400657 * helper to insert a delayed tree ref into the rbtree.
658 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500659static noinline void
660add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
661 struct btrfs_trans_handle *trans,
662 struct btrfs_delayed_ref_head *head_ref,
663 struct btrfs_delayed_ref_node *ref, u64 bytenr,
664 u64 num_bytes, u64 parent, u64 ref_root, int level,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100665 int action)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400666{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400667 struct btrfs_delayed_tree_ref *full_ref;
668 struct btrfs_delayed_ref_root *delayed_refs;
Arne Jansen00f04b82011-09-14 12:37:00 +0200669 u64 seq = 0;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800670 int ret;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400671
672 if (action == BTRFS_ADD_DELAYED_EXTENT)
673 action = BTRFS_ADD_DELAYED_REF;
674
Josef Bacikfcebe452014-05-13 17:30:47 -0700675 if (is_fstree(ref_root))
676 seq = atomic64_read(&fs_info->tree_mod_seq);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400677 delayed_refs = &trans->transaction->delayed_refs;
678
679 /* first set the basic ref node struct up */
Elena Reshetova6df8cdf2017-03-03 10:55:15 +0200680 refcount_set(&ref->refs, 1);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400681 ref->bytenr = bytenr;
682 ref->num_bytes = num_bytes;
683 ref->ref_mod = 1;
684 ref->action = action;
685 ref->is_head = 0;
686 ref->in_tree = 1;
Arne Jansen00f04b82011-09-14 12:37:00 +0200687 ref->seq = seq;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800688 INIT_LIST_HEAD(&ref->list);
689 INIT_LIST_HEAD(&ref->add_list);
Arne Jansen00f04b82011-09-14 12:37:00 +0200690
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400691 full_ref = btrfs_delayed_node_to_tree_ref(ref);
Arne Janseneebe0632011-09-14 14:01:24 +0200692 full_ref->parent = parent;
693 full_ref->root = ref_root;
694 if (parent)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400695 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
Arne Janseneebe0632011-09-14 14:01:24 +0200696 else
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400697 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400698 full_ref->level = level;
699
Jeff Mahoneybc074522016-06-09 17:27:55 -0400700 trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000701
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800702 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
703
704 /*
705 * XXX: memory should be freed at the same level allocated.
706 * But bad practice is anywhere... Follow it now. Need cleanup.
707 */
708 if (ret > 0)
Miao Xie78a61842012-11-21 02:21:28 +0000709 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400710}
711
712/*
713 * helper to insert a delayed data ref into the rbtree.
714 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500715static noinline void
716add_delayed_data_ref(struct btrfs_fs_info *fs_info,
717 struct btrfs_trans_handle *trans,
718 struct btrfs_delayed_ref_head *head_ref,
719 struct btrfs_delayed_ref_node *ref, u64 bytenr,
720 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100721 u64 offset, int action)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400722{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400723 struct btrfs_delayed_data_ref *full_ref;
724 struct btrfs_delayed_ref_root *delayed_refs;
Arne Jansen00f04b82011-09-14 12:37:00 +0200725 u64 seq = 0;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800726 int ret;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400727
728 if (action == BTRFS_ADD_DELAYED_EXTENT)
729 action = BTRFS_ADD_DELAYED_REF;
730
731 delayed_refs = &trans->transaction->delayed_refs;
732
Josef Bacikfcebe452014-05-13 17:30:47 -0700733 if (is_fstree(ref_root))
734 seq = atomic64_read(&fs_info->tree_mod_seq);
735
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400736 /* first set the basic ref node struct up */
Elena Reshetova6df8cdf2017-03-03 10:55:15 +0200737 refcount_set(&ref->refs, 1);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400738 ref->bytenr = bytenr;
739 ref->num_bytes = num_bytes;
740 ref->ref_mod = 1;
741 ref->action = action;
742 ref->is_head = 0;
743 ref->in_tree = 1;
Arne Jansen00f04b82011-09-14 12:37:00 +0200744 ref->seq = seq;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800745 INIT_LIST_HEAD(&ref->list);
746 INIT_LIST_HEAD(&ref->add_list);
Arne Jansen00f04b82011-09-14 12:37:00 +0200747
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400748 full_ref = btrfs_delayed_node_to_data_ref(ref);
Arne Janseneebe0632011-09-14 14:01:24 +0200749 full_ref->parent = parent;
750 full_ref->root = ref_root;
751 if (parent)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400752 ref->type = BTRFS_SHARED_DATA_REF_KEY;
Arne Janseneebe0632011-09-14 14:01:24 +0200753 else
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400754 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200755
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400756 full_ref->objectid = owner;
757 full_ref->offset = offset;
758
Jeff Mahoneybc074522016-06-09 17:27:55 -0400759 trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000760
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800761 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
762
763 if (ret > 0)
Miao Xie78a61842012-11-21 02:21:28 +0000764 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400765}
766
767/*
768 * add a delayed tree ref. This does all of the accounting required
Chris Mason56bec292009-03-13 10:10:06 -0400769 * to make sure the delayed ref is eventually processed before this
770 * transaction commits.
771 */
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200772int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
773 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400774 u64 bytenr, u64 num_bytes, u64 parent,
775 u64 ref_root, int level, int action,
Omar Sandoval7be07912017-06-06 16:45:30 -0700776 struct btrfs_delayed_extent_op *extent_op,
777 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400778{
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400779 struct btrfs_delayed_tree_ref *ref;
Chris Mason56bec292009-03-13 10:10:06 -0400780 struct btrfs_delayed_ref_head *head_ref;
781 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800782 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800783 int qrecord_inserted;
Chris Mason56bec292009-03-13 10:10:06 -0400784
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400785 BUG_ON(extent_op && extent_op->is_data);
Miao Xie78a61842012-11-21 02:21:28 +0000786 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
Chris Mason56bec292009-03-13 10:10:06 -0400787 if (!ref)
788 return -ENOMEM;
789
Miao Xie78a61842012-11-21 02:21:28 +0000790 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Dan Carpenter5a5003df2015-06-24 17:32:33 +0300791 if (!head_ref)
792 goto free_ref;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400793
Josef Bacikafcdd122016-09-02 15:40:02 -0400794 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
795 is_fstree(ref_root)) {
Qu Wenruo3368d002015-04-16 14:34:17 +0800796 record = kmalloc(sizeof(*record), GFP_NOFS);
Dan Carpenter5a5003df2015-06-24 17:32:33 +0300797 if (!record)
798 goto free_head_ref;
Qu Wenruo3368d002015-04-16 14:34:17 +0800799 }
800
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400801 head_ref->extent_op = extent_op;
802
Chris Mason56bec292009-03-13 10:10:06 -0400803 delayed_refs = &trans->transaction->delayed_refs;
804 spin_lock(&delayed_refs->lock);
805
806 /*
807 * insert both the head node and the new ref without dropping
808 * the spin lock
809 */
Josef Bacikd2788502017-09-29 15:43:57 -0400810 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
Qu Wenruofb235dc2017-02-15 10:43:03 +0800811 bytenr, num_bytes, 0, 0, action, 0,
Omar Sandoval7be07912017-06-06 16:45:30 -0700812 &qrecord_inserted, old_ref_mod,
813 new_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400814
Josef Bacikd7df2c72014-01-23 09:21:38 -0500815 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100816 num_bytes, parent, ref_root, level, action);
Chris Mason56bec292009-03-13 10:10:06 -0400817 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200818
Qu Wenruofb235dc2017-02-15 10:43:03 +0800819 if (qrecord_inserted)
820 return btrfs_qgroup_trace_extent_post(fs_info, record);
Chris Mason56bec292009-03-13 10:10:06 -0400821 return 0;
Dan Carpenter5a5003df2015-06-24 17:32:33 +0300822
823free_head_ref:
824 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
825free_ref:
826 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
827
828 return -ENOMEM;
Chris Mason56bec292009-03-13 10:10:06 -0400829}
830
831/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400832 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
833 */
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200834int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
835 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400836 u64 bytenr, u64 num_bytes,
837 u64 parent, u64 ref_root,
Omar Sandoval7be07912017-06-06 16:45:30 -0700838 u64 owner, u64 offset, u64 reserved, int action,
839 int *old_ref_mod, int *new_ref_mod)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400840{
841 struct btrfs_delayed_data_ref *ref;
842 struct btrfs_delayed_ref_head *head_ref;
843 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800844 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800845 int qrecord_inserted;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400846
Miao Xie78a61842012-11-21 02:21:28 +0000847 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400848 if (!ref)
849 return -ENOMEM;
850
Miao Xie78a61842012-11-21 02:21:28 +0000851 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400852 if (!head_ref) {
Miao Xie78a61842012-11-21 02:21:28 +0000853 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400854 return -ENOMEM;
855 }
856
Josef Bacikafcdd122016-09-02 15:40:02 -0400857 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
858 is_fstree(ref_root)) {
Qu Wenruo3368d002015-04-16 14:34:17 +0800859 record = kmalloc(sizeof(*record), GFP_NOFS);
860 if (!record) {
861 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
862 kmem_cache_free(btrfs_delayed_ref_head_cachep,
863 head_ref);
864 return -ENOMEM;
865 }
866 }
867
Jeff Mahoneyfef394f2016-12-13 14:39:34 -0500868 head_ref->extent_op = NULL;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400869
870 delayed_refs = &trans->transaction->delayed_refs;
871 spin_lock(&delayed_refs->lock);
872
873 /*
874 * insert both the head node and the new ref without dropping
875 * the spin lock
876 */
Josef Bacikd2788502017-09-29 15:43:57 -0400877 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800878 bytenr, num_bytes, ref_root, reserved,
Omar Sandoval7be07912017-06-06 16:45:30 -0700879 action, 1, &qrecord_inserted,
880 old_ref_mod, new_ref_mod);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400881
Josef Bacikd7df2c72014-01-23 09:21:38 -0500882 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200883 num_bytes, parent, ref_root, owner, offset,
Filipe Mananab06c4bf2015-10-23 07:52:54 +0100884 action);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400885 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200886
Qu Wenruofb235dc2017-02-15 10:43:03 +0800887 if (qrecord_inserted)
888 return btrfs_qgroup_trace_extent_post(fs_info, record);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400889 return 0;
890}
891
Arne Jansen66d7e7f2011-09-12 15:26:38 +0200892int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
893 struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400894 u64 bytenr, u64 num_bytes,
895 struct btrfs_delayed_extent_op *extent_op)
896{
897 struct btrfs_delayed_ref_head *head_ref;
898 struct btrfs_delayed_ref_root *delayed_refs;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400899
Miao Xie78a61842012-11-21 02:21:28 +0000900 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400901 if (!head_ref)
902 return -ENOMEM;
903
904 head_ref->extent_op = extent_op;
905
906 delayed_refs = &trans->transaction->delayed_refs;
907 spin_lock(&delayed_refs->lock);
908
Josef Bacikd2788502017-09-29 15:43:57 -0400909 add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
Qu Wenruo5846a3c2015-10-26 14:11:18 +0800910 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
Omar Sandoval7be07912017-06-06 16:45:30 -0700911 extent_op->is_data, NULL, NULL, NULL);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400912
913 spin_unlock(&delayed_refs->lock);
914 return 0;
915}
916
917/*
Chris Mason1887be62009-03-13 10:11:24 -0400918 * this does a simple search for the head node for a given extent.
919 * It must be called with the delayed ref spinlock held, and it returns
920 * the head node if any where found, or NULL if not.
921 */
922struct btrfs_delayed_ref_head *
Liu Bof72ad18e2017-01-30 12:24:37 -0800923btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
Chris Mason1887be62009-03-13 10:11:24 -0400924{
Filipe Manana85fdfdf2014-02-12 15:07:53 +0000925 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
Chris Mason1887be62009-03-13 10:11:24 -0400926}
Miao Xie78a61842012-11-21 02:21:28 +0000927
928void btrfs_delayed_ref_exit(void)
929{
Kinglong Mee5598e902016-01-29 21:36:35 +0800930 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
931 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
932 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
933 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
Miao Xie78a61842012-11-21 02:21:28 +0000934}
935
936int btrfs_delayed_ref_init(void)
937{
938 btrfs_delayed_ref_head_cachep = kmem_cache_create(
939 "btrfs_delayed_ref_head",
940 sizeof(struct btrfs_delayed_ref_head), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300941 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000942 if (!btrfs_delayed_ref_head_cachep)
943 goto fail;
944
945 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
946 "btrfs_delayed_tree_ref",
947 sizeof(struct btrfs_delayed_tree_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300948 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000949 if (!btrfs_delayed_tree_ref_cachep)
950 goto fail;
951
952 btrfs_delayed_data_ref_cachep = kmem_cache_create(
953 "btrfs_delayed_data_ref",
954 sizeof(struct btrfs_delayed_data_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300955 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000956 if (!btrfs_delayed_data_ref_cachep)
957 goto fail;
958
959 btrfs_delayed_extent_op_cachep = kmem_cache_create(
960 "btrfs_delayed_extent_op",
961 sizeof(struct btrfs_delayed_extent_op), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300962 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000963 if (!btrfs_delayed_extent_op_cachep)
964 goto fail;
965
966 return 0;
967fail:
968 btrfs_delayed_ref_exit();
969 return -ENOMEM;
970}