blob: a73fc23e29618700d6bc800c897b6da7cf616341 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason56bec292009-03-13 10:10:06 -04002/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
Chris Mason56bec292009-03-13 10:10:06 -04004 */
5
6#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Chris Mason56bec292009-03-13 10:10:06 -04008#include <linux/sort.h>
Chris Mason56bec292009-03-13 10:10:06 -04009#include "ctree.h"
10#include "delayed-ref.h"
11#include "transaction.h"
Qu Wenruo3368d002015-04-16 14:34:17 +080012#include "qgroup.h"
Chris Mason56bec292009-03-13 10:10:06 -040013
Miao Xie78a61842012-11-21 02:21:28 +000014struct kmem_cache *btrfs_delayed_ref_head_cachep;
15struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16struct kmem_cache *btrfs_delayed_data_ref_cachep;
17struct kmem_cache *btrfs_delayed_extent_op_cachep;
Chris Mason56bec292009-03-13 10:10:06 -040018/*
19 * delayed back reference update tracking. For subvolume trees
20 * we queue up extent allocations and backref maintenance for
21 * delayed processing. This avoids deep call chains where we
22 * add extents in the middle of btrfs_search_slot, and it allows
23 * us to buffer up frequently modified backrefs in an rb tree instead
24 * of hammering updates on the extent allocation tree.
Chris Mason56bec292009-03-13 10:10:06 -040025 */
26
27/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -040028 * compare two delayed tree backrefs with same bytenr and type
Chris Mason56bec292009-03-13 10:10:06 -040029 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -040030static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 struct btrfs_delayed_tree_ref *ref2)
Chris Mason56bec292009-03-13 10:10:06 -040032{
Josef Bacik3b60d432017-09-29 15:43:58 -040033 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
Josef Bacik41b0fc42013-04-01 20:36:28 -040034 if (ref1->root < ref2->root)
35 return -1;
36 if (ref1->root > ref2->root)
37 return 1;
38 } else {
39 if (ref1->parent < ref2->parent)
40 return -1;
41 if (ref1->parent > ref2->parent)
42 return 1;
43 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -040044 return 0;
45}
46
47/*
48 * compare two delayed data backrefs with same bytenr and type
49 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -040050static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 struct btrfs_delayed_data_ref *ref2)
Yan Zheng5d4f98a2009-06-10 10:45:14 -040052{
53 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 if (ref1->root < ref2->root)
55 return -1;
56 if (ref1->root > ref2->root)
57 return 1;
58 if (ref1->objectid < ref2->objectid)
59 return -1;
60 if (ref1->objectid > ref2->objectid)
61 return 1;
62 if (ref1->offset < ref2->offset)
63 return -1;
64 if (ref1->offset > ref2->offset)
65 return 1;
66 } else {
67 if (ref1->parent < ref2->parent)
68 return -1;
69 if (ref1->parent > ref2->parent)
70 return 1;
71 }
72 return 0;
73}
74
Josef Bacik1d148e52017-10-19 14:15:59 -040075static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 struct btrfs_delayed_ref_node *ref2,
77 bool check_seq)
78{
79 int ret = 0;
80
81 if (ref1->type < ref2->type)
82 return -1;
83 if (ref1->type > ref2->type)
84 return 1;
85 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 btrfs_delayed_node_to_tree_ref(ref2));
89 else
90 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 btrfs_delayed_node_to_data_ref(ref2));
92 if (ret)
93 return ret;
94 if (check_seq) {
95 if (ref1->seq < ref2->seq)
96 return -1;
97 if (ref1->seq > ref2->seq)
98 return 1;
99 }
100 return 0;
101}
102
Liu Boc46effa2013-10-14 12:59:45 +0800103/* insert a new ref to head ref rbtree */
Liu Bo5c9d0282018-08-23 03:51:49 +0800104static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
Liu Boc46effa2013-10-14 12:59:45 +0800105 struct rb_node *node)
106{
Liu Bo5c9d0282018-08-23 03:51:49 +0800107 struct rb_node **p = &root->rb_root.rb_node;
Liu Boc46effa2013-10-14 12:59:45 +0800108 struct rb_node *parent_node = NULL;
109 struct btrfs_delayed_ref_head *entry;
110 struct btrfs_delayed_ref_head *ins;
111 u64 bytenr;
Liu Bo5c9d0282018-08-23 03:51:49 +0800112 bool leftmost = true;
Liu Boc46effa2013-10-14 12:59:45 +0800113
114 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
Josef Bacikd2788502017-09-29 15:43:57 -0400115 bytenr = ins->bytenr;
Liu Boc46effa2013-10-14 12:59:45 +0800116 while (*p) {
117 parent_node = *p;
118 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
119 href_node);
120
Liu Bo5c9d0282018-08-23 03:51:49 +0800121 if (bytenr < entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800122 p = &(*p)->rb_left;
Liu Bo5c9d0282018-08-23 03:51:49 +0800123 } else if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800124 p = &(*p)->rb_right;
Liu Bo5c9d0282018-08-23 03:51:49 +0800125 leftmost = false;
126 } else {
Liu Boc46effa2013-10-14 12:59:45 +0800127 return entry;
Liu Bo5c9d0282018-08-23 03:51:49 +0800128 }
Liu Boc46effa2013-10-14 12:59:45 +0800129 }
130
131 rb_link_node(node, parent_node, p);
Liu Bo5c9d0282018-08-23 03:51:49 +0800132 rb_insert_color_cached(node, root, leftmost);
Liu Boc46effa2013-10-14 12:59:45 +0800133 return NULL;
134}
135
Liu Boe3d03962018-08-23 03:51:50 +0800136static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400137 struct btrfs_delayed_ref_node *ins)
138{
Liu Boe3d03962018-08-23 03:51:50 +0800139 struct rb_node **p = &root->rb_root.rb_node;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400140 struct rb_node *node = &ins->ref_node;
141 struct rb_node *parent_node = NULL;
142 struct btrfs_delayed_ref_node *entry;
Liu Boe3d03962018-08-23 03:51:50 +0800143 bool leftmost = true;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400144
145 while (*p) {
146 int comp;
147
148 parent_node = *p;
149 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
150 ref_node);
151 comp = comp_refs(ins, entry, true);
Liu Boe3d03962018-08-23 03:51:50 +0800152 if (comp < 0) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400153 p = &(*p)->rb_left;
Liu Boe3d03962018-08-23 03:51:50 +0800154 } else if (comp > 0) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400155 p = &(*p)->rb_right;
Liu Boe3d03962018-08-23 03:51:50 +0800156 leftmost = false;
157 } else {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400158 return entry;
Liu Boe3d03962018-08-23 03:51:50 +0800159 }
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400160 }
161
162 rb_link_node(node, parent_node, p);
Liu Boe3d03962018-08-23 03:51:50 +0800163 rb_insert_color_cached(node, root, leftmost);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400164 return NULL;
165}
166
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800167static struct btrfs_delayed_ref_head *find_first_ref_head(
168 struct btrfs_delayed_ref_root *dr)
169{
170 struct rb_node *n;
171 struct btrfs_delayed_ref_head *entry;
172
173 n = rb_first_cached(&dr->href_root);
174 if (!n)
175 return NULL;
176
177 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
178
179 return entry;
180}
181
Chris Mason56bec292009-03-13 10:10:06 -0400182/*
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800183 * Find a head entry based on bytenr. This returns the delayed ref head if it
184 * was able to find one, or NULL if nothing was in that spot. If return_bigger
185 * is given, the next bigger entry is returned if no exact match is found.
Chris Mason56bec292009-03-13 10:10:06 -0400186 */
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800187static struct btrfs_delayed_ref_head *find_ref_head(
Liu Bo5c9d0282018-08-23 03:51:49 +0800188 struct btrfs_delayed_ref_root *dr, u64 bytenr,
Lu Fengqid93527942018-10-11 13:40:38 +0800189 bool return_bigger)
Chris Mason56bec292009-03-13 10:10:06 -0400190{
Liu Bo5c9d0282018-08-23 03:51:49 +0800191 struct rb_root *root = &dr->href_root.rb_root;
Arne Jansend1270cd2011-09-13 15:16:43 +0200192 struct rb_node *n;
Liu Boc46effa2013-10-14 12:59:45 +0800193 struct btrfs_delayed_ref_head *entry;
Chris Mason56bec292009-03-13 10:10:06 -0400194
Arne Jansend1270cd2011-09-13 15:16:43 +0200195 n = root->rb_node;
196 entry = NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400197 while (n) {
Liu Boc46effa2013-10-14 12:59:45 +0800198 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400199
Josef Bacikd2788502017-09-29 15:43:57 -0400200 if (bytenr < entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400201 n = n->rb_left;
Josef Bacikd2788502017-09-29 15:43:57 -0400202 else if (bytenr > entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400203 n = n->rb_right;
204 else
205 return entry;
206 }
Arne Jansend1270cd2011-09-13 15:16:43 +0200207 if (entry && return_bigger) {
Josef Bacikd2788502017-09-29 15:43:57 -0400208 if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800209 n = rb_next(&entry->href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200210 if (!n)
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800211 return NULL;
Liu Boc46effa2013-10-14 12:59:45 +0800212 entry = rb_entry(n, struct btrfs_delayed_ref_head,
213 href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200214 }
215 return entry;
216 }
Chris Mason56bec292009-03-13 10:10:06 -0400217 return NULL;
218}
219
Lu Fengqi9e920a62018-10-11 13:40:34 +0800220int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
Chris Masonc3e69d52009-03-13 10:17:05 -0400221 struct btrfs_delayed_ref_head *head)
Chris Mason56bec292009-03-13 10:10:06 -0400222{
David Sterbaa4666e62018-03-16 02:21:22 +0100223 lockdep_assert_held(&delayed_refs->lock);
Chris Masonc3e69d52009-03-13 10:17:05 -0400224 if (mutex_trylock(&head->mutex))
225 return 0;
226
Josef Bacikd2788502017-09-29 15:43:57 -0400227 refcount_inc(&head->refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400228 spin_unlock(&delayed_refs->lock);
229
230 mutex_lock(&head->mutex);
231 spin_lock(&delayed_refs->lock);
Josef Bacikd2788502017-09-29 15:43:57 -0400232 if (RB_EMPTY_NODE(&head->href_node)) {
Chris Masonc3e69d52009-03-13 10:17:05 -0400233 mutex_unlock(&head->mutex);
Josef Bacikd2788502017-09-29 15:43:57 -0400234 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400235 return -EAGAIN;
236 }
Josef Bacikd2788502017-09-29 15:43:57 -0400237 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400238 return 0;
239}
240
Stefan Behrens35a36212013-08-14 18:12:25 +0200241static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
Josef Bacikae1e2062012-08-07 16:00:32 -0400242 struct btrfs_delayed_ref_root *delayed_refs,
Josef Bacikd7df2c72014-01-23 09:21:38 -0500243 struct btrfs_delayed_ref_head *head,
Josef Bacikae1e2062012-08-07 16:00:32 -0400244 struct btrfs_delayed_ref_node *ref)
245{
David Sterbaa4666e62018-03-16 02:21:22 +0100246 lockdep_assert_held(&head->lock);
Liu Boe3d03962018-08-23 03:51:50 +0800247 rb_erase_cached(&ref->ref_node, &head->ref_tree);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400248 RB_CLEAR_NODE(&ref->ref_node);
Josef Bacikd2788502017-09-29 15:43:57 -0400249 if (!list_empty(&ref->add_list))
250 list_del(&ref->add_list);
Josef Bacikae1e2062012-08-07 16:00:32 -0400251 ref->in_tree = 0;
252 btrfs_put_delayed_ref(ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500253 atomic_dec(&delayed_refs->num_entries);
Josef Bacikae1e2062012-08-07 16:00:32 -0400254}
255
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100256static bool merge_ref(struct btrfs_trans_handle *trans,
257 struct btrfs_delayed_ref_root *delayed_refs,
258 struct btrfs_delayed_ref_head *head,
259 struct btrfs_delayed_ref_node *ref,
260 u64 seq)
261{
262 struct btrfs_delayed_ref_node *next;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400263 struct rb_node *node = rb_next(&ref->ref_node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100264 bool done = false;
265
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400266 while (!done && node) {
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100267 int mod;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100268
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400269 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
270 node = rb_next(node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100271 if (seq && next->seq >= seq)
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400272 break;
Josef Bacik1d148e52017-10-19 14:15:59 -0400273 if (comp_refs(ref, next, false))
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400274 break;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100275
276 if (ref->action == next->action) {
277 mod = next->ref_mod;
278 } else {
279 if (ref->ref_mod < next->ref_mod) {
280 swap(ref, next);
281 done = true;
282 }
283 mod = -next->ref_mod;
284 }
285
286 drop_delayed_ref(trans, delayed_refs, head, next);
287 ref->ref_mod += mod;
288 if (ref->ref_mod == 0) {
289 drop_delayed_ref(trans, delayed_refs, head, ref);
290 done = true;
291 } else {
292 /*
293 * Can't have multiples of the same ref on a tree block.
294 */
295 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
296 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
297 }
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100298 }
299
300 return done;
301}
302
303void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100304 struct btrfs_delayed_ref_root *delayed_refs,
305 struct btrfs_delayed_ref_head *head)
306{
Nikolay Borisovbe97f132018-04-19 11:06:39 +0300307 struct btrfs_fs_info *fs_info = trans->fs_info;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100308 struct btrfs_delayed_ref_node *ref;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400309 struct rb_node *node;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100310 u64 seq = 0;
311
David Sterbaa4666e62018-03-16 02:21:22 +0100312 lockdep_assert_held(&head->lock);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100313
Liu Boe3d03962018-08-23 03:51:50 +0800314 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100315 return;
316
317 /* We don't have too many refs to merge for data. */
318 if (head->is_data)
319 return;
320
321 spin_lock(&fs_info->tree_mod_seq_lock);
322 if (!list_empty(&fs_info->tree_mod_seq_list)) {
323 struct seq_list *elem;
324
325 elem = list_first_entry(&fs_info->tree_mod_seq_list,
326 struct seq_list, list);
327 seq = elem->seq;
328 }
329 spin_unlock(&fs_info->tree_mod_seq_lock);
330
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400331again:
Liu Boe3d03962018-08-23 03:51:50 +0800332 for (node = rb_first_cached(&head->ref_tree); node;
333 node = rb_next(node)) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400334 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100335 if (seq && ref->seq >= seq)
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100336 continue;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400337 if (merge_ref(trans, delayed_refs, head, ref, seq))
338 goto again;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100339 }
340}
341
Nikolay Borisov41d0bd32018-04-04 15:57:42 +0300342int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
Arne Jansen00f04b82011-09-14 12:37:00 +0200343{
344 struct seq_list *elem;
Jan Schmidt097b8a72012-06-21 11:08:04 +0200345 int ret = 0;
Arne Jansen00f04b82011-09-14 12:37:00 +0200346
Jan Schmidt097b8a72012-06-21 11:08:04 +0200347 spin_lock(&fs_info->tree_mod_seq_lock);
348 if (!list_empty(&fs_info->tree_mod_seq_list)) {
349 elem = list_first_entry(&fs_info->tree_mod_seq_list,
350 struct seq_list, list);
351 if (seq >= elem->seq) {
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400352 btrfs_debug(fs_info,
Nikolay Borisov41d0bd32018-04-04 15:57:42 +0300353 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -0400354 (u32)(seq >> 32), (u32)seq,
Nikolay Borisov41d0bd32018-04-04 15:57:42 +0300355 (u32)(elem->seq >> 32), (u32)elem->seq);
Jan Schmidt097b8a72012-06-21 11:08:04 +0200356 ret = 1;
357 }
Arne Jansen00f04b82011-09-14 12:37:00 +0200358 }
Jan Schmidt097b8a72012-06-21 11:08:04 +0200359
360 spin_unlock(&fs_info->tree_mod_seq_lock);
361 return ret;
Arne Jansen00f04b82011-09-14 12:37:00 +0200362}
363
Lu Fengqi5637c742018-10-11 13:40:33 +0800364struct btrfs_delayed_ref_head *btrfs_select_ref_head(
365 struct btrfs_delayed_ref_root *delayed_refs)
Chris Masonc3e69d52009-03-13 10:17:05 -0400366{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500367 struct btrfs_delayed_ref_head *head;
Chris Masonc3e69d52009-03-13 10:17:05 -0400368
Chris Masonc3e69d52009-03-13 10:17:05 -0400369again:
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800370 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
371 true);
372 if (!head && delayed_refs->run_delayed_start != 0) {
Josef Bacikd7df2c72014-01-23 09:21:38 -0500373 delayed_refs->run_delayed_start = 0;
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800374 head = find_first_ref_head(delayed_refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400375 }
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800376 if (!head)
377 return NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400378
Josef Bacikd7df2c72014-01-23 09:21:38 -0500379 while (head->processing) {
380 struct rb_node *node;
Miao Xie093486c2012-12-19 08:10:10 +0000381
Josef Bacikd7df2c72014-01-23 09:21:38 -0500382 node = rb_next(&head->href_node);
383 if (!node) {
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800384 if (delayed_refs->run_delayed_start == 0)
Josef Bacikd7df2c72014-01-23 09:21:38 -0500385 return NULL;
386 delayed_refs->run_delayed_start = 0;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500387 goto again;
388 }
389 head = rb_entry(node, struct btrfs_delayed_ref_head,
390 href_node);
391 }
392
393 head->processing = 1;
394 WARN_ON(delayed_refs->num_heads_ready == 0);
395 delayed_refs->num_heads_ready--;
Josef Bacikd2788502017-09-29 15:43:57 -0400396 delayed_refs->run_delayed_start = head->bytenr +
397 head->num_bytes;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500398 return head;
Miao Xie093486c2012-12-19 08:10:10 +0000399}
400
Josef Bacikd7baffd2018-12-03 10:20:29 -0500401void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
402 struct btrfs_delayed_ref_head *head)
403{
404 lockdep_assert_held(&delayed_refs->lock);
405 lockdep_assert_held(&head->lock);
406
407 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
408 RB_CLEAR_NODE(&head->href_node);
409 atomic_dec(&delayed_refs->num_entries);
410 delayed_refs->num_heads--;
411 if (head->processing == 0)
412 delayed_refs->num_heads_ready--;
413}
414
Chris Mason56bec292009-03-13 10:10:06 -0400415/*
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800416 * Helper to insert the ref_node to the tail or merge with tail.
417 *
418 * Return 0 for insert.
419 * Return >0 for merge.
420 */
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400421static int insert_delayed_ref(struct btrfs_trans_handle *trans,
422 struct btrfs_delayed_ref_root *root,
423 struct btrfs_delayed_ref_head *href,
424 struct btrfs_delayed_ref_node *ref)
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800425{
426 struct btrfs_delayed_ref_node *exist;
427 int mod;
428 int ret = 0;
429
430 spin_lock(&href->lock);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400431 exist = tree_insert(&href->ref_tree, ref);
432 if (!exist)
433 goto inserted;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800434
435 /* Now we are sure we can merge */
436 ret = 1;
437 if (exist->action == ref->action) {
438 mod = ref->ref_mod;
439 } else {
440 /* Need to change action */
441 if (exist->ref_mod < ref->ref_mod) {
442 exist->action = ref->action;
443 mod = -exist->ref_mod;
444 exist->ref_mod = ref->ref_mod;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800445 if (ref->action == BTRFS_ADD_DELAYED_REF)
446 list_add_tail(&exist->add_list,
447 &href->ref_add_list);
448 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
449 ASSERT(!list_empty(&exist->add_list));
450 list_del(&exist->add_list);
451 } else {
452 ASSERT(0);
453 }
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800454 } else
455 mod = -ref->ref_mod;
456 }
457 exist->ref_mod += mod;
458
459 /* remove existing tail if its ref_mod is zero */
460 if (exist->ref_mod == 0)
461 drop_delayed_ref(trans, root, href, exist);
462 spin_unlock(&href->lock);
463 return ret;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400464inserted:
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800465 if (ref->action == BTRFS_ADD_DELAYED_REF)
466 list_add_tail(&ref->add_list, &href->ref_add_list);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800467 atomic_inc(&root->num_entries);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800468 spin_unlock(&href->lock);
469 return ret;
470}
471
472/*
Chris Mason56bec292009-03-13 10:10:06 -0400473 * helper function to update the accounting in the head ref
474 * existing and update must have the same bytenr
475 */
Josef Bacikba2c4d42018-12-03 10:20:33 -0500476static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400477 struct btrfs_delayed_ref_head *existing,
478 struct btrfs_delayed_ref_head *update,
Omar Sandoval7be07912017-06-06 16:45:30 -0700479 int *old_ref_mod_ret)
Chris Mason56bec292009-03-13 10:10:06 -0400480{
Josef Bacikba2c4d42018-12-03 10:20:33 -0500481 struct btrfs_delayed_ref_root *delayed_refs =
482 &trans->transaction->delayed_refs;
483 struct btrfs_fs_info *fs_info = trans->fs_info;
Josef Bacik12621332015-02-03 07:50:16 -0800484 int old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400485
Josef Bacikd2788502017-09-29 15:43:57 -0400486 BUG_ON(existing->is_data != update->is_data);
Chris Mason56bec292009-03-13 10:10:06 -0400487
Josef Bacikd2788502017-09-29 15:43:57 -0400488 spin_lock(&existing->lock);
489 if (update->must_insert_reserved) {
Chris Mason56bec292009-03-13 10:10:06 -0400490 /* if the extent was freed and then
491 * reallocated before the delayed ref
492 * entries were processed, we can end up
493 * with an existing head ref without
494 * the must_insert_reserved flag set.
495 * Set it again here
496 */
Josef Bacikd2788502017-09-29 15:43:57 -0400497 existing->must_insert_reserved = update->must_insert_reserved;
Chris Mason56bec292009-03-13 10:10:06 -0400498
499 /*
500 * update the num_bytes so we make sure the accounting
501 * is done correctly
502 */
503 existing->num_bytes = update->num_bytes;
504
505 }
506
Josef Bacikd2788502017-09-29 15:43:57 -0400507 if (update->extent_op) {
508 if (!existing->extent_op) {
509 existing->extent_op = update->extent_op;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400510 } else {
Josef Bacikd2788502017-09-29 15:43:57 -0400511 if (update->extent_op->update_key) {
512 memcpy(&existing->extent_op->key,
513 &update->extent_op->key,
514 sizeof(update->extent_op->key));
515 existing->extent_op->update_key = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400516 }
Josef Bacikd2788502017-09-29 15:43:57 -0400517 if (update->extent_op->update_flags) {
518 existing->extent_op->flags_to_set |=
519 update->extent_op->flags_to_set;
520 existing->extent_op->update_flags = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400521 }
Josef Bacikd2788502017-09-29 15:43:57 -0400522 btrfs_free_delayed_extent_op(update->extent_op);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400523 }
524 }
Chris Mason56bec292009-03-13 10:10:06 -0400525 /*
Josef Bacikd7df2c72014-01-23 09:21:38 -0500526 * update the reference mod on the head to reflect this new operation,
527 * only need the lock for this case cause we could be processing it
528 * currently, for refs we just added we know we're a-ok.
Chris Mason56bec292009-03-13 10:10:06 -0400529 */
Josef Bacikd2788502017-09-29 15:43:57 -0400530 old_ref_mod = existing->total_ref_mod;
Omar Sandoval7be07912017-06-06 16:45:30 -0700531 if (old_ref_mod_ret)
532 *old_ref_mod_ret = old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400533 existing->ref_mod += update->ref_mod;
Josef Bacikd2788502017-09-29 15:43:57 -0400534 existing->total_ref_mod += update->ref_mod;
Josef Bacik12621332015-02-03 07:50:16 -0800535
536 /*
537 * If we are going to from a positive ref mod to a negative or vice
538 * versa we need to make sure to adjust pending_csums accordingly.
539 */
Josef Bacikd2788502017-09-29 15:43:57 -0400540 if (existing->is_data) {
Josef Bacikba2c4d42018-12-03 10:20:33 -0500541 u64 csum_leaves =
542 btrfs_csum_bytes_to_leaves(fs_info,
543 existing->num_bytes);
544
545 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
Josef Bacik12621332015-02-03 07:50:16 -0800546 delayed_refs->pending_csums -= existing->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500547 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
548 }
549 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
Josef Bacik12621332015-02-03 07:50:16 -0800550 delayed_refs->pending_csums += existing->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500551 trans->delayed_ref_updates += csum_leaves;
552 }
Josef Bacik12621332015-02-03 07:50:16 -0800553 }
Josef Bacikd2788502017-09-29 15:43:57 -0400554 spin_unlock(&existing->lock);
Chris Mason56bec292009-03-13 10:10:06 -0400555}
556
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300557static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
558 struct btrfs_qgroup_extent_record *qrecord,
559 u64 bytenr, u64 num_bytes, u64 ref_root,
560 u64 reserved, int action, bool is_data,
561 bool is_system)
562{
563 int count_mod = 1;
564 int must_insert_reserved = 0;
565
566 /* If reserved is provided, it must be a data extent. */
567 BUG_ON(!is_data && reserved);
568
569 /*
570 * The head node stores the sum of all the mods, so dropping a ref
571 * should drop the sum in the head node by one.
572 */
573 if (action == BTRFS_UPDATE_DELAYED_HEAD)
574 count_mod = 0;
575 else if (action == BTRFS_DROP_DELAYED_REF)
576 count_mod = -1;
577
578 /*
579 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
580 * accounting when the extent is finally added, or if a later
581 * modification deletes the delayed ref without ever inserting the
582 * extent into the extent allocation tree. ref->must_insert_reserved
583 * is the flag used to record that accounting mods are required.
584 *
585 * Once we record must_insert_reserved, switch the action to
586 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
587 */
588 if (action == BTRFS_ADD_DELAYED_EXTENT)
589 must_insert_reserved = 1;
590 else
591 must_insert_reserved = 0;
592
593 refcount_set(&head_ref->refs, 1);
594 head_ref->bytenr = bytenr;
595 head_ref->num_bytes = num_bytes;
596 head_ref->ref_mod = count_mod;
597 head_ref->must_insert_reserved = must_insert_reserved;
598 head_ref->is_data = is_data;
599 head_ref->is_system = is_system;
Liu Boe3d03962018-08-23 03:51:50 +0800600 head_ref->ref_tree = RB_ROOT_CACHED;
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300601 INIT_LIST_HEAD(&head_ref->ref_add_list);
602 RB_CLEAR_NODE(&head_ref->href_node);
603 head_ref->processing = 0;
604 head_ref->total_ref_mod = count_mod;
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300605 spin_lock_init(&head_ref->lock);
606 mutex_init(&head_ref->mutex);
607
608 if (qrecord) {
609 if (ref_root && reserved) {
Qu Wenruo1418bae2019-01-23 15:15:12 +0800610 qrecord->data_rsv = reserved;
611 qrecord->data_rsv_refroot = ref_root;
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300612 }
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300613 qrecord->bytenr = bytenr;
614 qrecord->num_bytes = num_bytes;
615 qrecord->old_roots = NULL;
616 }
617}
618
Chris Mason56bec292009-03-13 10:10:06 -0400619/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400620 * helper function to actually insert a head node into the rbtree.
Chris Mason56bec292009-03-13 10:10:06 -0400621 * this does all the dirty work in terms of maintaining the correct
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400622 * overall modification count.
Chris Mason56bec292009-03-13 10:10:06 -0400623 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500624static noinline struct btrfs_delayed_ref_head *
Nikolay Borisov1acda0c2018-04-19 11:06:37 +0300625add_delayed_ref_head(struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400626 struct btrfs_delayed_ref_head *head_ref,
Qu Wenruo3368d002015-04-16 14:34:17 +0800627 struct btrfs_qgroup_extent_record *qrecord,
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300628 int action, int *qrecord_inserted_ret,
Omar Sandoval7be07912017-06-06 16:45:30 -0700629 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400630{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500631 struct btrfs_delayed_ref_head *existing;
Chris Mason56bec292009-03-13 10:10:06 -0400632 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800633 int qrecord_inserted = 0;
Chris Mason56bec292009-03-13 10:10:06 -0400634
Chris Mason56bec292009-03-13 10:10:06 -0400635 delayed_refs = &trans->transaction->delayed_refs;
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300636
Qu Wenruo3368d002015-04-16 14:34:17 +0800637 /* Record qgroup extent info if provided */
638 if (qrecord) {
Nikolay Borisoveb86ec72018-04-24 17:18:23 +0300639 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
Qu Wenruocb93b522016-08-15 10:36:50 +0800640 delayed_refs, qrecord))
Qu Wenruo3368d002015-04-16 14:34:17 +0800641 kfree(qrecord);
Qu Wenruofb235dc2017-02-15 10:43:03 +0800642 else
643 qrecord_inserted = 1;
Qu Wenruo3368d002015-04-16 14:34:17 +0800644 }
645
Nikolay Borisov1acda0c2018-04-19 11:06:37 +0300646 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000647
Josef Bacikd7df2c72014-01-23 09:21:38 -0500648 existing = htree_insert(&delayed_refs->href_root,
649 &head_ref->href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400650 if (existing) {
Josef Bacikba2c4d42018-12-03 10:20:33 -0500651 update_existing_head_ref(trans, existing, head_ref,
Omar Sandoval7be07912017-06-06 16:45:30 -0700652 old_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400653 /*
654 * we've updated the existing ref, free the newly
655 * allocated ref
656 */
Miao Xie78a61842012-11-21 02:21:28 +0000657 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500658 head_ref = existing;
Chris Mason56bec292009-03-13 10:10:06 -0400659 } else {
Omar Sandoval7be07912017-06-06 16:45:30 -0700660 if (old_ref_mod)
661 *old_ref_mod = 0;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500662 if (head_ref->is_data && head_ref->ref_mod < 0) {
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300663 delayed_refs->pending_csums += head_ref->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500664 trans->delayed_ref_updates +=
665 btrfs_csum_bytes_to_leaves(trans->fs_info,
666 head_ref->num_bytes);
667 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400668 delayed_refs->num_heads++;
669 delayed_refs->num_heads_ready++;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500670 atomic_inc(&delayed_refs->num_entries);
Chris Mason56bec292009-03-13 10:10:06 -0400671 trans->delayed_ref_updates++;
672 }
Qu Wenruofb235dc2017-02-15 10:43:03 +0800673 if (qrecord_inserted_ret)
674 *qrecord_inserted_ret = qrecord_inserted;
Omar Sandoval7be07912017-06-06 16:45:30 -0700675 if (new_ref_mod)
676 *new_ref_mod = head_ref->total_ref_mod;
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300677
Josef Bacikd7df2c72014-01-23 09:21:38 -0500678 return head_ref;
Chris Mason56bec292009-03-13 10:10:06 -0400679}
680
681/*
Nikolay Borisovcb49a872018-04-24 17:18:17 +0300682 * init_delayed_ref_common - Initialize the structure which represents a
683 * modification to a an extent.
684 *
685 * @fs_info: Internal to the mounted filesystem mount structure.
686 *
687 * @ref: The structure which is going to be initialized.
688 *
689 * @bytenr: The logical address of the extent for which a modification is
690 * going to be recorded.
691 *
692 * @num_bytes: Size of the extent whose modification is being recorded.
693 *
694 * @ref_root: The id of the root where this modification has originated, this
695 * can be either one of the well-known metadata trees or the
696 * subvolume id which references this extent.
697 *
698 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
699 * BTRFS_ADD_DELAYED_EXTENT
700 *
701 * @ref_type: Holds the type of the extent which is being recorded, can be
702 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
703 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
704 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
705 */
706static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
707 struct btrfs_delayed_ref_node *ref,
708 u64 bytenr, u64 num_bytes, u64 ref_root,
709 int action, u8 ref_type)
710{
711 u64 seq = 0;
712
713 if (action == BTRFS_ADD_DELAYED_EXTENT)
714 action = BTRFS_ADD_DELAYED_REF;
715
716 if (is_fstree(ref_root))
717 seq = atomic64_read(&fs_info->tree_mod_seq);
718
719 refcount_set(&ref->refs, 1);
720 ref->bytenr = bytenr;
721 ref->num_bytes = num_bytes;
722 ref->ref_mod = 1;
723 ref->action = action;
724 ref->is_head = 0;
725 ref->in_tree = 1;
726 ref->seq = seq;
727 ref->type = ref_type;
728 RB_CLEAR_NODE(&ref->ref_node);
729 INIT_LIST_HEAD(&ref->add_list);
730}
731
732/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400733 * add a delayed tree ref. This does all of the accounting required
Chris Mason56bec292009-03-13 10:10:06 -0400734 * to make sure the delayed ref is eventually processed before this
735 * transaction commits.
736 */
Nikolay Borisov44e1c472018-06-20 15:48:53 +0300737int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
Qu Wenruoed4f2552019-04-04 14:45:31 +0800738 struct btrfs_ref *generic_ref,
Omar Sandoval7be07912017-06-06 16:45:30 -0700739 struct btrfs_delayed_extent_op *extent_op,
740 int *old_ref_mod, int *new_ref_mod)
Chris Mason56bec292009-03-13 10:10:06 -0400741{
Nikolay Borisov44e1c472018-06-20 15:48:53 +0300742 struct btrfs_fs_info *fs_info = trans->fs_info;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400743 struct btrfs_delayed_tree_ref *ref;
Chris Mason56bec292009-03-13 10:10:06 -0400744 struct btrfs_delayed_ref_head *head_ref;
745 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800746 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800747 int qrecord_inserted;
Qu Wenruoed4f2552019-04-04 14:45:31 +0800748 bool is_system;
749 int action = generic_ref->action;
750 int level = generic_ref->tree_ref.level;
Nikolay Borisov70d64002018-04-24 17:18:20 +0300751 int ret;
Qu Wenruoed4f2552019-04-04 14:45:31 +0800752 u64 bytenr = generic_ref->bytenr;
753 u64 num_bytes = generic_ref->len;
754 u64 parent = generic_ref->parent;
Nikolay Borisov70d64002018-04-24 17:18:20 +0300755 u8 ref_type;
Chris Mason56bec292009-03-13 10:10:06 -0400756
Qu Wenruoed4f2552019-04-04 14:45:31 +0800757 is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
758
759 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400760 BUG_ON(extent_op && extent_op->is_data);
Miao Xie78a61842012-11-21 02:21:28 +0000761 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
Chris Mason56bec292009-03-13 10:10:06 -0400762 if (!ref)
763 return -ENOMEM;
764
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300765 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
766 if (!head_ref) {
767 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
768 return -ENOMEM;
769 }
770
771 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
Qu Wenruoed4f2552019-04-04 14:45:31 +0800772 is_fstree(generic_ref->real_root) &&
773 is_fstree(generic_ref->tree_ref.root) &&
774 !generic_ref->skip_qgroup) {
Qu Wenruo1418bae2019-01-23 15:15:12 +0800775 record = kzalloc(sizeof(*record), GFP_NOFS);
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300776 if (!record) {
777 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
778 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
779 return -ENOMEM;
780 }
781 }
782
Nikolay Borisov70d64002018-04-24 17:18:20 +0300783 if (parent)
784 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
785 else
786 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300787
Nikolay Borisov70d64002018-04-24 17:18:20 +0300788 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
Qu Wenruoed4f2552019-04-04 14:45:31 +0800789 generic_ref->tree_ref.root, action, ref_type);
790 ref->root = generic_ref->tree_ref.root;
Nikolay Borisov70d64002018-04-24 17:18:20 +0300791 ref->parent = parent;
792 ref->level = level;
793
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300794 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
Qu Wenruoed4f2552019-04-04 14:45:31 +0800795 generic_ref->tree_ref.root, 0, action, false,
796 is_system);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400797 head_ref->extent_op = extent_op;
798
Chris Mason56bec292009-03-13 10:10:06 -0400799 delayed_refs = &trans->transaction->delayed_refs;
800 spin_lock(&delayed_refs->lock);
801
802 /*
803 * insert both the head node and the new ref without dropping
804 * the spin lock
805 */
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300806 head_ref = add_delayed_ref_head(trans, head_ref, record,
807 action, &qrecord_inserted,
Nikolay Borisov5e388e92018-04-18 09:41:54 +0300808 old_ref_mod, new_ref_mod);
Chris Mason56bec292009-03-13 10:10:06 -0400809
Nikolay Borisov70d64002018-04-24 17:18:20 +0300810 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
Chris Mason56bec292009-03-13 10:10:06 -0400811 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200812
Josef Bacikba2c4d42018-12-03 10:20:33 -0500813 /*
814 * Need to update the delayed_refs_rsv with any changes we may have
815 * made.
816 */
817 btrfs_update_delayed_refs_rsv(trans);
818
Nikolay Borisov70d64002018-04-24 17:18:20 +0300819 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
820 action == BTRFS_ADD_DELAYED_EXTENT ?
821 BTRFS_ADD_DELAYED_REF : action);
822 if (ret > 0)
823 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
824
Qu Wenruofb235dc2017-02-15 10:43:03 +0800825 if (qrecord_inserted)
Nikolay Borisov952bd3db2018-01-29 15:53:01 +0200826 btrfs_qgroup_trace_extent_post(fs_info, record);
827
Chris Mason56bec292009-03-13 10:10:06 -0400828 return 0;
829}
830
831/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400832 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
833 */
Nikolay Borisov88a979c2018-06-20 15:48:54 +0300834int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
Qu Wenruo76675592019-04-04 14:45:32 +0800835 struct btrfs_ref *generic_ref,
836 u64 reserved, int *old_ref_mod,
837 int *new_ref_mod)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400838{
Nikolay Borisov88a979c2018-06-20 15:48:54 +0300839 struct btrfs_fs_info *fs_info = trans->fs_info;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400840 struct btrfs_delayed_data_ref *ref;
841 struct btrfs_delayed_ref_head *head_ref;
842 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800843 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800844 int qrecord_inserted;
Qu Wenruo76675592019-04-04 14:45:32 +0800845 int action = generic_ref->action;
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300846 int ret;
Qu Wenruo76675592019-04-04 14:45:32 +0800847 u64 bytenr = generic_ref->bytenr;
848 u64 num_bytes = generic_ref->len;
849 u64 parent = generic_ref->parent;
850 u64 ref_root = generic_ref->data_ref.ref_root;
851 u64 owner = generic_ref->data_ref.ino;
852 u64 offset = generic_ref->data_ref.offset;
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300853 u8 ref_type;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400854
Qu Wenruo76675592019-04-04 14:45:32 +0800855 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
Miao Xie78a61842012-11-21 02:21:28 +0000856 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400857 if (!ref)
858 return -ENOMEM;
859
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300860 if (parent)
861 ref_type = BTRFS_SHARED_DATA_REF_KEY;
862 else
863 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
864 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
865 ref_root, action, ref_type);
866 ref->root = ref_root;
867 ref->parent = parent;
868 ref->objectid = owner;
869 ref->offset = offset;
870
871
Miao Xie78a61842012-11-21 02:21:28 +0000872 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400873 if (!head_ref) {
Miao Xie78a61842012-11-21 02:21:28 +0000874 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400875 return -ENOMEM;
876 }
877
Josef Bacikafcdd122016-09-02 15:40:02 -0400878 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
Qu Wenruo76675592019-04-04 14:45:32 +0800879 is_fstree(ref_root) &&
880 is_fstree(generic_ref->real_root) &&
881 !generic_ref->skip_qgroup) {
Qu Wenruo1418bae2019-01-23 15:15:12 +0800882 record = kzalloc(sizeof(*record), GFP_NOFS);
Qu Wenruo3368d002015-04-16 14:34:17 +0800883 if (!record) {
884 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
885 kmem_cache_free(btrfs_delayed_ref_head_cachep,
886 head_ref);
887 return -ENOMEM;
888 }
889 }
890
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300891 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
892 reserved, action, true, false);
Jeff Mahoneyfef394f2016-12-13 14:39:34 -0500893 head_ref->extent_op = NULL;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400894
895 delayed_refs = &trans->transaction->delayed_refs;
896 spin_lock(&delayed_refs->lock);
897
898 /*
899 * insert both the head node and the new ref without dropping
900 * the spin lock
901 */
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300902 head_ref = add_delayed_ref_head(trans, head_ref, record,
903 action, &qrecord_inserted,
Omar Sandoval7be07912017-06-06 16:45:30 -0700904 old_ref_mod, new_ref_mod);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400905
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300906 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400907 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200908
Josef Bacikba2c4d42018-12-03 10:20:33 -0500909 /*
910 * Need to update the delayed_refs_rsv with any changes we may have
911 * made.
912 */
913 btrfs_update_delayed_refs_rsv(trans);
914
Nikolay Borisovcd7f9692018-04-24 17:18:21 +0300915 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
916 action == BTRFS_ADD_DELAYED_EXTENT ?
917 BTRFS_ADD_DELAYED_REF : action);
918 if (ret > 0)
919 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
920
921
Qu Wenruofb235dc2017-02-15 10:43:03 +0800922 if (qrecord_inserted)
923 return btrfs_qgroup_trace_extent_post(fs_info, record);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400924 return 0;
925}
926
David Sterbac6e340b2019-03-20 11:42:34 +0100927int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400928 u64 bytenr, u64 num_bytes,
929 struct btrfs_delayed_extent_op *extent_op)
930{
931 struct btrfs_delayed_ref_head *head_ref;
932 struct btrfs_delayed_ref_root *delayed_refs;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400933
Miao Xie78a61842012-11-21 02:21:28 +0000934 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400935 if (!head_ref)
936 return -ENOMEM;
937
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300938 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
939 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
940 false);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400941 head_ref->extent_op = extent_op;
942
943 delayed_refs = &trans->transaction->delayed_refs;
944 spin_lock(&delayed_refs->lock);
945
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300946 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
947 NULL, NULL, NULL);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400948
949 spin_unlock(&delayed_refs->lock);
Josef Bacikba2c4d42018-12-03 10:20:33 -0500950
951 /*
952 * Need to update the delayed_refs_rsv with any changes we may have
953 * made.
954 */
955 btrfs_update_delayed_refs_rsv(trans);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400956 return 0;
957}
958
959/*
Chris Mason1887be62009-03-13 10:11:24 -0400960 * this does a simple search for the head node for a given extent.
961 * It must be called with the delayed ref spinlock held, and it returns
962 * the head node if any where found, or NULL if not.
963 */
964struct btrfs_delayed_ref_head *
Liu Bof72ad18e2017-01-30 12:24:37 -0800965btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
Chris Mason1887be62009-03-13 10:11:24 -0400966{
Lu Fengqid93527942018-10-11 13:40:38 +0800967 return find_ref_head(delayed_refs, bytenr, false);
Chris Mason1887be62009-03-13 10:11:24 -0400968}
Miao Xie78a61842012-11-21 02:21:28 +0000969
David Sterbae67c7182018-02-19 17:24:18 +0100970void __cold btrfs_delayed_ref_exit(void)
Miao Xie78a61842012-11-21 02:21:28 +0000971{
Kinglong Mee5598e902016-01-29 21:36:35 +0800972 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
973 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
974 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
975 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
Miao Xie78a61842012-11-21 02:21:28 +0000976}
977
Liu Bof5c29bd2017-11-02 17:21:50 -0600978int __init btrfs_delayed_ref_init(void)
Miao Xie78a61842012-11-21 02:21:28 +0000979{
980 btrfs_delayed_ref_head_cachep = kmem_cache_create(
981 "btrfs_delayed_ref_head",
982 sizeof(struct btrfs_delayed_ref_head), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300983 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000984 if (!btrfs_delayed_ref_head_cachep)
985 goto fail;
986
987 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
988 "btrfs_delayed_tree_ref",
989 sizeof(struct btrfs_delayed_tree_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300990 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000991 if (!btrfs_delayed_tree_ref_cachep)
992 goto fail;
993
994 btrfs_delayed_data_ref_cachep = kmem_cache_create(
995 "btrfs_delayed_data_ref",
996 sizeof(struct btrfs_delayed_data_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +0300997 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +0000998 if (!btrfs_delayed_data_ref_cachep)
999 goto fail;
1000
1001 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1002 "btrfs_delayed_extent_op",
1003 sizeof(struct btrfs_delayed_extent_op), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +03001004 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +00001005 if (!btrfs_delayed_extent_op_cachep)
1006 goto fail;
1007
1008 return 0;
1009fail:
1010 btrfs_delayed_ref_exit();
1011 return -ENOMEM;
1012}