blob: 4176df149d042707cbbde646382bdf43c5645400 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason56bec292009-03-13 10:10:06 -04002/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
Chris Mason56bec292009-03-13 10:10:06 -04004 */
5
6#include <linux/sched.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Chris Mason56bec292009-03-13 10:10:06 -04008#include <linux/sort.h>
Chris Mason56bec292009-03-13 10:10:06 -04009#include "ctree.h"
10#include "delayed-ref.h"
11#include "transaction.h"
Qu Wenruo3368d002015-04-16 14:34:17 +080012#include "qgroup.h"
Josef Bacik6ef03de2019-06-19 15:11:58 -040013#include "space-info.h"
Filipe Mananaf3a84cc2021-03-11 14:31:07 +000014#include "tree-mod-log.h"
Chris Mason56bec292009-03-13 10:10:06 -040015
Miao Xie78a61842012-11-21 02:21:28 +000016struct kmem_cache *btrfs_delayed_ref_head_cachep;
17struct kmem_cache *btrfs_delayed_tree_ref_cachep;
18struct kmem_cache *btrfs_delayed_data_ref_cachep;
19struct kmem_cache *btrfs_delayed_extent_op_cachep;
Chris Mason56bec292009-03-13 10:10:06 -040020/*
21 * delayed back reference update tracking. For subvolume trees
22 * we queue up extent allocations and backref maintenance for
23 * delayed processing. This avoids deep call chains where we
24 * add extents in the middle of btrfs_search_slot, and it allows
25 * us to buffer up frequently modified backrefs in an rb tree instead
26 * of hammering updates on the extent allocation tree.
Chris Mason56bec292009-03-13 10:10:06 -040027 */
28
Josef Bacik6ef03de2019-06-19 15:11:58 -040029bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
30{
31 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
32 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
33 bool ret = false;
34 u64 reserved;
35
36 spin_lock(&global_rsv->lock);
37 reserved = global_rsv->reserved;
38 spin_unlock(&global_rsv->lock);
39
40 /*
41 * Since the global reserve is just kind of magic we don't really want
42 * to rely on it to save our bacon, so if our size is more than the
43 * delayed_refs_rsv and the global rsv then it's time to think about
44 * bailing.
45 */
46 spin_lock(&delayed_refs_rsv->lock);
47 reserved += delayed_refs_rsv->reserved;
48 if (delayed_refs_rsv->size >= reserved)
49 ret = true;
50 spin_unlock(&delayed_refs_rsv->lock);
51 return ret;
52}
53
54int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
55{
56 u64 num_entries =
57 atomic_read(&trans->transaction->delayed_refs.num_entries);
58 u64 avg_runtime;
59 u64 val;
60
61 smp_mb();
62 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
63 val = num_entries * avg_runtime;
64 if (val >= NSEC_PER_SEC)
65 return 1;
66 if (val >= NSEC_PER_SEC / 2)
67 return 2;
68
69 return btrfs_check_space_for_delayed_refs(trans->fs_info);
70}
71
72/**
Nikolay Borisov696eb22b2021-01-22 11:57:55 +020073 * Release a ref head's reservation
74 *
75 * @fs_info: the filesystem
76 * @nr: number of items to drop
Josef Bacik6ef03de2019-06-19 15:11:58 -040077 *
78 * This drops the delayed ref head's count from the delayed refs rsv and frees
79 * any excess reservation we had.
80 */
81void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
82{
83 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
Josef Bacik2bd36e72019-08-22 15:14:33 -040084 u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
Josef Bacik6ef03de2019-06-19 15:11:58 -040085 u64 released = 0;
86
Josef Bacikc18e3232021-12-02 15:34:32 -050087 /*
88 * We have to check the mount option here because we could be enabling
89 * the free space tree for the first time and don't have the compat_ro
90 * option set yet.
91 *
92 * We need extra reservations if we have the free space tree because
93 * we'll have to modify that tree as well.
94 */
95 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
96 num_bytes *= 2;
97
Nikolay Borisov63f018b2020-03-10 10:59:31 +020098 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
Josef Bacik6ef03de2019-06-19 15:11:58 -040099 if (released)
100 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
101 0, released, 0);
102}
103
104/*
105 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
106 * @trans - the trans that may have generated delayed refs
107 *
108 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
109 * it'll calculate the additional size and add it to the delayed_refs_rsv.
110 */
111void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
112{
113 struct btrfs_fs_info *fs_info = trans->fs_info;
114 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
115 u64 num_bytes;
116
117 if (!trans->delayed_ref_updates)
118 return;
119
Josef Bacik2bd36e72019-08-22 15:14:33 -0400120 num_bytes = btrfs_calc_insert_metadata_size(fs_info,
121 trans->delayed_ref_updates);
Josef Bacikc18e3232021-12-02 15:34:32 -0500122 /*
123 * We have to check the mount option here because we could be enabling
124 * the free space tree for the first time and don't have the compat_ro
125 * option set yet.
126 *
127 * We need extra reservations if we have the free space tree because
128 * we'll have to modify that tree as well.
129 */
130 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
131 num_bytes *= 2;
132
Josef Bacik6ef03de2019-06-19 15:11:58 -0400133 spin_lock(&delayed_rsv->lock);
134 delayed_rsv->size += num_bytes;
135 delayed_rsv->full = 0;
136 spin_unlock(&delayed_rsv->lock);
137 trans->delayed_ref_updates = 0;
138}
139
140/**
Nikolay Borisov696eb22b2021-01-22 11:57:55 +0200141 * Transfer bytes to our delayed refs rsv
142 *
143 * @fs_info: the filesystem
144 * @src: source block rsv to transfer from
145 * @num_bytes: number of bytes to transfer
Josef Bacik6ef03de2019-06-19 15:11:58 -0400146 *
147 * This transfers up to the num_bytes amount from the src rsv to the
148 * delayed_refs_rsv. Any extra bytes are returned to the space info.
149 */
150void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
151 struct btrfs_block_rsv *src,
152 u64 num_bytes)
153{
154 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
155 u64 to_free = 0;
156
157 spin_lock(&src->lock);
158 src->reserved -= num_bytes;
159 src->size -= num_bytes;
160 spin_unlock(&src->lock);
161
162 spin_lock(&delayed_refs_rsv->lock);
163 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
164 u64 delta = delayed_refs_rsv->size -
165 delayed_refs_rsv->reserved;
166 if (num_bytes > delta) {
167 to_free = num_bytes - delta;
168 num_bytes = delta;
169 }
170 } else {
171 to_free = num_bytes;
172 num_bytes = 0;
173 }
174
175 if (num_bytes)
176 delayed_refs_rsv->reserved += num_bytes;
177 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
178 delayed_refs_rsv->full = 1;
179 spin_unlock(&delayed_refs_rsv->lock);
180
181 if (num_bytes)
182 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
183 0, num_bytes, 1);
184 if (to_free)
Josef Bacikd05e4642019-08-22 15:11:02 -0400185 btrfs_space_info_free_bytes_may_use(fs_info,
Josef Bacik6ef03de2019-06-19 15:11:58 -0400186 delayed_refs_rsv->space_info, to_free);
187}
188
189/**
Nikolay Borisov696eb22b2021-01-22 11:57:55 +0200190 * Refill based on our delayed refs usage
191 *
192 * @fs_info: the filesystem
193 * @flush: control how we can flush for this reservation.
Josef Bacik6ef03de2019-06-19 15:11:58 -0400194 *
195 * This will refill the delayed block_rsv up to 1 items size worth of space and
196 * will return -ENOSPC if we can't make the reservation.
197 */
198int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
199 enum btrfs_reserve_flush_enum flush)
200{
201 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
Josef Bacik2bd36e72019-08-22 15:14:33 -0400202 u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
Josef Bacik6ef03de2019-06-19 15:11:58 -0400203 u64 num_bytes = 0;
204 int ret = -ENOSPC;
205
206 spin_lock(&block_rsv->lock);
207 if (block_rsv->reserved < block_rsv->size) {
208 num_bytes = block_rsv->size - block_rsv->reserved;
209 num_bytes = min(num_bytes, limit);
210 }
211 spin_unlock(&block_rsv->lock);
212
213 if (!num_bytes)
214 return 0;
215
Josef Bacik92705012021-11-09 10:12:07 -0500216 ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
Josef Bacik6ef03de2019-06-19 15:11:58 -0400217 if (ret)
218 return ret;
219 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
220 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
221 0, num_bytes, 1);
222 return 0;
223}
224
Chris Mason56bec292009-03-13 10:10:06 -0400225/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400226 * compare two delayed tree backrefs with same bytenr and type
Chris Mason56bec292009-03-13 10:10:06 -0400227 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -0400228static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
229 struct btrfs_delayed_tree_ref *ref2)
Chris Mason56bec292009-03-13 10:10:06 -0400230{
Josef Bacik3b60d432017-09-29 15:43:58 -0400231 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
Josef Bacik41b0fc42013-04-01 20:36:28 -0400232 if (ref1->root < ref2->root)
233 return -1;
234 if (ref1->root > ref2->root)
235 return 1;
236 } else {
237 if (ref1->parent < ref2->parent)
238 return -1;
239 if (ref1->parent > ref2->parent)
240 return 1;
241 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400242 return 0;
243}
244
245/*
246 * compare two delayed data backrefs with same bytenr and type
247 */
Josef Bacikc7ad7c82017-10-19 14:15:58 -0400248static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
249 struct btrfs_delayed_data_ref *ref2)
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400250{
251 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
252 if (ref1->root < ref2->root)
253 return -1;
254 if (ref1->root > ref2->root)
255 return 1;
256 if (ref1->objectid < ref2->objectid)
257 return -1;
258 if (ref1->objectid > ref2->objectid)
259 return 1;
260 if (ref1->offset < ref2->offset)
261 return -1;
262 if (ref1->offset > ref2->offset)
263 return 1;
264 } else {
265 if (ref1->parent < ref2->parent)
266 return -1;
267 if (ref1->parent > ref2->parent)
268 return 1;
269 }
270 return 0;
271}
272
Josef Bacik1d148e52017-10-19 14:15:59 -0400273static int comp_refs(struct btrfs_delayed_ref_node *ref1,
274 struct btrfs_delayed_ref_node *ref2,
275 bool check_seq)
276{
277 int ret = 0;
278
279 if (ref1->type < ref2->type)
280 return -1;
281 if (ref1->type > ref2->type)
282 return 1;
283 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
284 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
285 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
286 btrfs_delayed_node_to_tree_ref(ref2));
287 else
288 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
289 btrfs_delayed_node_to_data_ref(ref2));
290 if (ret)
291 return ret;
292 if (check_seq) {
293 if (ref1->seq < ref2->seq)
294 return -1;
295 if (ref1->seq > ref2->seq)
296 return 1;
297 }
298 return 0;
299}
300
Liu Boc46effa2013-10-14 12:59:45 +0800301/* insert a new ref to head ref rbtree */
Liu Bo5c9d0282018-08-23 03:51:49 +0800302static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
Liu Boc46effa2013-10-14 12:59:45 +0800303 struct rb_node *node)
304{
Liu Bo5c9d0282018-08-23 03:51:49 +0800305 struct rb_node **p = &root->rb_root.rb_node;
Liu Boc46effa2013-10-14 12:59:45 +0800306 struct rb_node *parent_node = NULL;
307 struct btrfs_delayed_ref_head *entry;
308 struct btrfs_delayed_ref_head *ins;
309 u64 bytenr;
Liu Bo5c9d0282018-08-23 03:51:49 +0800310 bool leftmost = true;
Liu Boc46effa2013-10-14 12:59:45 +0800311
312 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
Josef Bacikd2788502017-09-29 15:43:57 -0400313 bytenr = ins->bytenr;
Liu Boc46effa2013-10-14 12:59:45 +0800314 while (*p) {
315 parent_node = *p;
316 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
317 href_node);
318
Liu Bo5c9d0282018-08-23 03:51:49 +0800319 if (bytenr < entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800320 p = &(*p)->rb_left;
Liu Bo5c9d0282018-08-23 03:51:49 +0800321 } else if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800322 p = &(*p)->rb_right;
Liu Bo5c9d0282018-08-23 03:51:49 +0800323 leftmost = false;
324 } else {
Liu Boc46effa2013-10-14 12:59:45 +0800325 return entry;
Liu Bo5c9d0282018-08-23 03:51:49 +0800326 }
Liu Boc46effa2013-10-14 12:59:45 +0800327 }
328
329 rb_link_node(node, parent_node, p);
Liu Bo5c9d0282018-08-23 03:51:49 +0800330 rb_insert_color_cached(node, root, leftmost);
Liu Boc46effa2013-10-14 12:59:45 +0800331 return NULL;
332}
333
Liu Boe3d03962018-08-23 03:51:50 +0800334static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400335 struct btrfs_delayed_ref_node *ins)
336{
Liu Boe3d03962018-08-23 03:51:50 +0800337 struct rb_node **p = &root->rb_root.rb_node;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400338 struct rb_node *node = &ins->ref_node;
339 struct rb_node *parent_node = NULL;
340 struct btrfs_delayed_ref_node *entry;
Liu Boe3d03962018-08-23 03:51:50 +0800341 bool leftmost = true;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400342
343 while (*p) {
344 int comp;
345
346 parent_node = *p;
347 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
348 ref_node);
349 comp = comp_refs(ins, entry, true);
Liu Boe3d03962018-08-23 03:51:50 +0800350 if (comp < 0) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400351 p = &(*p)->rb_left;
Liu Boe3d03962018-08-23 03:51:50 +0800352 } else if (comp > 0) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400353 p = &(*p)->rb_right;
Liu Boe3d03962018-08-23 03:51:50 +0800354 leftmost = false;
355 } else {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400356 return entry;
Liu Boe3d03962018-08-23 03:51:50 +0800357 }
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400358 }
359
360 rb_link_node(node, parent_node, p);
Liu Boe3d03962018-08-23 03:51:50 +0800361 rb_insert_color_cached(node, root, leftmost);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400362 return NULL;
363}
364
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800365static struct btrfs_delayed_ref_head *find_first_ref_head(
366 struct btrfs_delayed_ref_root *dr)
367{
368 struct rb_node *n;
369 struct btrfs_delayed_ref_head *entry;
370
371 n = rb_first_cached(&dr->href_root);
372 if (!n)
373 return NULL;
374
375 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
376
377 return entry;
378}
379
Chris Mason56bec292009-03-13 10:10:06 -0400380/*
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800381 * Find a head entry based on bytenr. This returns the delayed ref head if it
382 * was able to find one, or NULL if nothing was in that spot. If return_bigger
383 * is given, the next bigger entry is returned if no exact match is found.
Chris Mason56bec292009-03-13 10:10:06 -0400384 */
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800385static struct btrfs_delayed_ref_head *find_ref_head(
Liu Bo5c9d0282018-08-23 03:51:49 +0800386 struct btrfs_delayed_ref_root *dr, u64 bytenr,
Lu Fengqid93527942018-10-11 13:40:38 +0800387 bool return_bigger)
Chris Mason56bec292009-03-13 10:10:06 -0400388{
Liu Bo5c9d0282018-08-23 03:51:49 +0800389 struct rb_root *root = &dr->href_root.rb_root;
Arne Jansend1270cd2011-09-13 15:16:43 +0200390 struct rb_node *n;
Liu Boc46effa2013-10-14 12:59:45 +0800391 struct btrfs_delayed_ref_head *entry;
Chris Mason56bec292009-03-13 10:10:06 -0400392
Arne Jansend1270cd2011-09-13 15:16:43 +0200393 n = root->rb_node;
394 entry = NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400395 while (n) {
Liu Boc46effa2013-10-14 12:59:45 +0800396 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400397
Josef Bacikd2788502017-09-29 15:43:57 -0400398 if (bytenr < entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400399 n = n->rb_left;
Josef Bacikd2788502017-09-29 15:43:57 -0400400 else if (bytenr > entry->bytenr)
Chris Mason56bec292009-03-13 10:10:06 -0400401 n = n->rb_right;
402 else
403 return entry;
404 }
Arne Jansend1270cd2011-09-13 15:16:43 +0200405 if (entry && return_bigger) {
Josef Bacikd2788502017-09-29 15:43:57 -0400406 if (bytenr > entry->bytenr) {
Liu Boc46effa2013-10-14 12:59:45 +0800407 n = rb_next(&entry->href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200408 if (!n)
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800409 return NULL;
Liu Boc46effa2013-10-14 12:59:45 +0800410 entry = rb_entry(n, struct btrfs_delayed_ref_head,
411 href_node);
Arne Jansend1270cd2011-09-13 15:16:43 +0200412 }
413 return entry;
414 }
Chris Mason56bec292009-03-13 10:10:06 -0400415 return NULL;
416}
417
Lu Fengqi9e920a62018-10-11 13:40:34 +0800418int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
Chris Masonc3e69d52009-03-13 10:17:05 -0400419 struct btrfs_delayed_ref_head *head)
Chris Mason56bec292009-03-13 10:10:06 -0400420{
David Sterbaa4666e62018-03-16 02:21:22 +0100421 lockdep_assert_held(&delayed_refs->lock);
Chris Masonc3e69d52009-03-13 10:17:05 -0400422 if (mutex_trylock(&head->mutex))
423 return 0;
424
Josef Bacikd2788502017-09-29 15:43:57 -0400425 refcount_inc(&head->refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400426 spin_unlock(&delayed_refs->lock);
427
428 mutex_lock(&head->mutex);
429 spin_lock(&delayed_refs->lock);
Josef Bacikd2788502017-09-29 15:43:57 -0400430 if (RB_EMPTY_NODE(&head->href_node)) {
Chris Masonc3e69d52009-03-13 10:17:05 -0400431 mutex_unlock(&head->mutex);
Josef Bacikd2788502017-09-29 15:43:57 -0400432 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400433 return -EAGAIN;
434 }
Josef Bacikd2788502017-09-29 15:43:57 -0400435 btrfs_put_delayed_ref_head(head);
Chris Masonc3e69d52009-03-13 10:17:05 -0400436 return 0;
437}
438
Stefan Behrens35a36212013-08-14 18:12:25 +0200439static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
Josef Bacikae1e2062012-08-07 16:00:32 -0400440 struct btrfs_delayed_ref_root *delayed_refs,
Josef Bacikd7df2c72014-01-23 09:21:38 -0500441 struct btrfs_delayed_ref_head *head,
Josef Bacikae1e2062012-08-07 16:00:32 -0400442 struct btrfs_delayed_ref_node *ref)
443{
David Sterbaa4666e62018-03-16 02:21:22 +0100444 lockdep_assert_held(&head->lock);
Liu Boe3d03962018-08-23 03:51:50 +0800445 rb_erase_cached(&ref->ref_node, &head->ref_tree);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400446 RB_CLEAR_NODE(&ref->ref_node);
Josef Bacikd2788502017-09-29 15:43:57 -0400447 if (!list_empty(&ref->add_list))
448 list_del(&ref->add_list);
Josef Bacikae1e2062012-08-07 16:00:32 -0400449 ref->in_tree = 0;
450 btrfs_put_delayed_ref(ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500451 atomic_dec(&delayed_refs->num_entries);
Josef Bacikae1e2062012-08-07 16:00:32 -0400452}
453
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100454static bool merge_ref(struct btrfs_trans_handle *trans,
455 struct btrfs_delayed_ref_root *delayed_refs,
456 struct btrfs_delayed_ref_head *head,
457 struct btrfs_delayed_ref_node *ref,
458 u64 seq)
459{
460 struct btrfs_delayed_ref_node *next;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400461 struct rb_node *node = rb_next(&ref->ref_node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100462 bool done = false;
463
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400464 while (!done && node) {
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100465 int mod;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100466
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400467 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
468 node = rb_next(node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100469 if (seq && next->seq >= seq)
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400470 break;
Josef Bacik1d148e52017-10-19 14:15:59 -0400471 if (comp_refs(ref, next, false))
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400472 break;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100473
474 if (ref->action == next->action) {
475 mod = next->ref_mod;
476 } else {
477 if (ref->ref_mod < next->ref_mod) {
478 swap(ref, next);
479 done = true;
480 }
481 mod = -next->ref_mod;
482 }
483
484 drop_delayed_ref(trans, delayed_refs, head, next);
485 ref->ref_mod += mod;
486 if (ref->ref_mod == 0) {
487 drop_delayed_ref(trans, delayed_refs, head, ref);
488 done = true;
489 } else {
490 /*
491 * Can't have multiples of the same ref on a tree block.
492 */
493 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
494 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
495 }
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100496 }
497
498 return done;
499}
500
501void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100502 struct btrfs_delayed_ref_root *delayed_refs,
503 struct btrfs_delayed_ref_head *head)
504{
Nikolay Borisovbe97f132018-04-19 11:06:39 +0300505 struct btrfs_fs_info *fs_info = trans->fs_info;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100506 struct btrfs_delayed_ref_node *ref;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400507 struct rb_node *node;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100508 u64 seq = 0;
509
David Sterbaa4666e62018-03-16 02:21:22 +0100510 lockdep_assert_held(&head->lock);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100511
Liu Boe3d03962018-08-23 03:51:50 +0800512 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100513 return;
514
515 /* We don't have too many refs to merge for data. */
516 if (head->is_data)
517 return;
518
Filipe Manana4bae7882021-03-11 14:31:12 +0000519 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400520again:
Liu Boe3d03962018-08-23 03:51:50 +0800521 for (node = rb_first_cached(&head->ref_tree); node;
522 node = rb_next(node)) {
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400523 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100524 if (seq && ref->seq >= seq)
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100525 continue;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400526 if (merge_ref(trans, delayed_refs, head, ref, seq))
527 goto again;
Filipe Manana2c3cf7d2015-10-22 09:47:34 +0100528 }
529}
530
Nikolay Borisov41d0bd32018-04-04 15:57:42 +0300531int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
Arne Jansen00f04b82011-09-14 12:37:00 +0200532{
Jan Schmidt097b8a72012-06-21 11:08:04 +0200533 int ret = 0;
Filipe Manana4bae7882021-03-11 14:31:12 +0000534 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
Arne Jansen00f04b82011-09-14 12:37:00 +0200535
Filipe Manana4bae7882021-03-11 14:31:12 +0000536 if (min_seq != 0 && seq >= min_seq) {
537 btrfs_debug(fs_info,
Filipe Mananaffbc10a2021-03-11 14:31:13 +0000538 "holding back delayed_ref %llu, lowest is %llu",
539 seq, min_seq);
Filipe Manana4bae7882021-03-11 14:31:12 +0000540 ret = 1;
Arne Jansen00f04b82011-09-14 12:37:00 +0200541 }
Jan Schmidt097b8a72012-06-21 11:08:04 +0200542
Jan Schmidt097b8a72012-06-21 11:08:04 +0200543 return ret;
Arne Jansen00f04b82011-09-14 12:37:00 +0200544}
545
Lu Fengqi5637c742018-10-11 13:40:33 +0800546struct btrfs_delayed_ref_head *btrfs_select_ref_head(
547 struct btrfs_delayed_ref_root *delayed_refs)
Chris Masonc3e69d52009-03-13 10:17:05 -0400548{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500549 struct btrfs_delayed_ref_head *head;
Chris Masonc3e69d52009-03-13 10:17:05 -0400550
Chris Masonc3e69d52009-03-13 10:17:05 -0400551again:
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800552 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
553 true);
554 if (!head && delayed_refs->run_delayed_start != 0) {
Josef Bacikd7df2c72014-01-23 09:21:38 -0500555 delayed_refs->run_delayed_start = 0;
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800556 head = find_first_ref_head(delayed_refs);
Chris Masonc3e69d52009-03-13 10:17:05 -0400557 }
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800558 if (!head)
559 return NULL;
Chris Mason56bec292009-03-13 10:10:06 -0400560
Josef Bacikd7df2c72014-01-23 09:21:38 -0500561 while (head->processing) {
562 struct rb_node *node;
Miao Xie093486c2012-12-19 08:10:10 +0000563
Josef Bacikd7df2c72014-01-23 09:21:38 -0500564 node = rb_next(&head->href_node);
565 if (!node) {
Lu Fengqi0a9df0d2018-10-15 14:25:38 +0800566 if (delayed_refs->run_delayed_start == 0)
Josef Bacikd7df2c72014-01-23 09:21:38 -0500567 return NULL;
568 delayed_refs->run_delayed_start = 0;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500569 goto again;
570 }
571 head = rb_entry(node, struct btrfs_delayed_ref_head,
572 href_node);
573 }
574
575 head->processing = 1;
576 WARN_ON(delayed_refs->num_heads_ready == 0);
577 delayed_refs->num_heads_ready--;
Josef Bacikd2788502017-09-29 15:43:57 -0400578 delayed_refs->run_delayed_start = head->bytenr +
579 head->num_bytes;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500580 return head;
Miao Xie093486c2012-12-19 08:10:10 +0000581}
582
Josef Bacikd7baffd2018-12-03 10:20:29 -0500583void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
584 struct btrfs_delayed_ref_head *head)
585{
586 lockdep_assert_held(&delayed_refs->lock);
587 lockdep_assert_held(&head->lock);
588
589 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
590 RB_CLEAR_NODE(&head->href_node);
591 atomic_dec(&delayed_refs->num_entries);
592 delayed_refs->num_heads--;
593 if (head->processing == 0)
594 delayed_refs->num_heads_ready--;
595}
596
Chris Mason56bec292009-03-13 10:10:06 -0400597/*
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800598 * Helper to insert the ref_node to the tail or merge with tail.
599 *
600 * Return 0 for insert.
601 * Return >0 for merge.
602 */
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400603static int insert_delayed_ref(struct btrfs_trans_handle *trans,
604 struct btrfs_delayed_ref_root *root,
605 struct btrfs_delayed_ref_head *href,
606 struct btrfs_delayed_ref_node *ref)
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800607{
608 struct btrfs_delayed_ref_node *exist;
609 int mod;
610 int ret = 0;
611
612 spin_lock(&href->lock);
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400613 exist = tree_insert(&href->ref_tree, ref);
614 if (!exist)
615 goto inserted;
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800616
617 /* Now we are sure we can merge */
618 ret = 1;
619 if (exist->action == ref->action) {
620 mod = ref->ref_mod;
621 } else {
622 /* Need to change action */
623 if (exist->ref_mod < ref->ref_mod) {
624 exist->action = ref->action;
625 mod = -exist->ref_mod;
626 exist->ref_mod = ref->ref_mod;
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800627 if (ref->action == BTRFS_ADD_DELAYED_REF)
628 list_add_tail(&exist->add_list,
629 &href->ref_add_list);
630 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
631 ASSERT(!list_empty(&exist->add_list));
632 list_del(&exist->add_list);
633 } else {
634 ASSERT(0);
635 }
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800636 } else
637 mod = -ref->ref_mod;
638 }
639 exist->ref_mod += mod;
640
641 /* remove existing tail if its ref_mod is zero */
642 if (exist->ref_mod == 0)
643 drop_delayed_ref(trans, root, href, exist);
644 spin_unlock(&href->lock);
645 return ret;
Josef Bacik0e0adbc2017-10-19 14:16:00 -0400646inserted:
Wang Xiaoguang1d57ee92016-10-26 18:07:33 +0800647 if (ref->action == BTRFS_ADD_DELAYED_REF)
648 list_add_tail(&ref->add_list, &href->ref_add_list);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800649 atomic_inc(&root->num_entries);
Qu Wenruoc6fc2452015-03-30 17:03:00 +0800650 spin_unlock(&href->lock);
651 return ret;
652}
653
654/*
Chris Mason56bec292009-03-13 10:10:06 -0400655 * helper function to update the accounting in the head ref
656 * existing and update must have the same bytenr
657 */
Josef Bacikba2c4d42018-12-03 10:20:33 -0500658static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400659 struct btrfs_delayed_ref_head *existing,
Josef Bacik21873742021-01-15 16:48:55 -0500660 struct btrfs_delayed_ref_head *update)
Chris Mason56bec292009-03-13 10:10:06 -0400661{
Josef Bacikba2c4d42018-12-03 10:20:33 -0500662 struct btrfs_delayed_ref_root *delayed_refs =
663 &trans->transaction->delayed_refs;
664 struct btrfs_fs_info *fs_info = trans->fs_info;
Josef Bacik12621332015-02-03 07:50:16 -0800665 int old_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400666
Josef Bacikd2788502017-09-29 15:43:57 -0400667 BUG_ON(existing->is_data != update->is_data);
Chris Mason56bec292009-03-13 10:10:06 -0400668
Josef Bacikd2788502017-09-29 15:43:57 -0400669 spin_lock(&existing->lock);
670 if (update->must_insert_reserved) {
Chris Mason56bec292009-03-13 10:10:06 -0400671 /* if the extent was freed and then
672 * reallocated before the delayed ref
673 * entries were processed, we can end up
674 * with an existing head ref without
675 * the must_insert_reserved flag set.
676 * Set it again here
677 */
Josef Bacikd2788502017-09-29 15:43:57 -0400678 existing->must_insert_reserved = update->must_insert_reserved;
Chris Mason56bec292009-03-13 10:10:06 -0400679
680 /*
681 * update the num_bytes so we make sure the accounting
682 * is done correctly
683 */
684 existing->num_bytes = update->num_bytes;
685
686 }
687
Josef Bacikd2788502017-09-29 15:43:57 -0400688 if (update->extent_op) {
689 if (!existing->extent_op) {
690 existing->extent_op = update->extent_op;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400691 } else {
Josef Bacikd2788502017-09-29 15:43:57 -0400692 if (update->extent_op->update_key) {
693 memcpy(&existing->extent_op->key,
694 &update->extent_op->key,
695 sizeof(update->extent_op->key));
696 existing->extent_op->update_key = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400697 }
Josef Bacikd2788502017-09-29 15:43:57 -0400698 if (update->extent_op->update_flags) {
699 existing->extent_op->flags_to_set |=
700 update->extent_op->flags_to_set;
701 existing->extent_op->update_flags = true;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400702 }
Josef Bacikd2788502017-09-29 15:43:57 -0400703 btrfs_free_delayed_extent_op(update->extent_op);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400704 }
705 }
Chris Mason56bec292009-03-13 10:10:06 -0400706 /*
Josef Bacikd7df2c72014-01-23 09:21:38 -0500707 * update the reference mod on the head to reflect this new operation,
708 * only need the lock for this case cause we could be processing it
709 * currently, for refs we just added we know we're a-ok.
Chris Mason56bec292009-03-13 10:10:06 -0400710 */
Josef Bacikd2788502017-09-29 15:43:57 -0400711 old_ref_mod = existing->total_ref_mod;
Chris Mason56bec292009-03-13 10:10:06 -0400712 existing->ref_mod += update->ref_mod;
Josef Bacikd2788502017-09-29 15:43:57 -0400713 existing->total_ref_mod += update->ref_mod;
Josef Bacik12621332015-02-03 07:50:16 -0800714
715 /*
716 * If we are going to from a positive ref mod to a negative or vice
717 * versa we need to make sure to adjust pending_csums accordingly.
718 */
Josef Bacikd2788502017-09-29 15:43:57 -0400719 if (existing->is_data) {
Josef Bacikba2c4d42018-12-03 10:20:33 -0500720 u64 csum_leaves =
721 btrfs_csum_bytes_to_leaves(fs_info,
722 existing->num_bytes);
723
724 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
Josef Bacik12621332015-02-03 07:50:16 -0800725 delayed_refs->pending_csums -= existing->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500726 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
727 }
728 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
Josef Bacik12621332015-02-03 07:50:16 -0800729 delayed_refs->pending_csums += existing->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500730 trans->delayed_ref_updates += csum_leaves;
731 }
Josef Bacik12621332015-02-03 07:50:16 -0800732 }
Josef Bacik21873742021-01-15 16:48:55 -0500733
Josef Bacikd2788502017-09-29 15:43:57 -0400734 spin_unlock(&existing->lock);
Chris Mason56bec292009-03-13 10:10:06 -0400735}
736
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300737static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
738 struct btrfs_qgroup_extent_record *qrecord,
739 u64 bytenr, u64 num_bytes, u64 ref_root,
740 u64 reserved, int action, bool is_data,
741 bool is_system)
742{
743 int count_mod = 1;
744 int must_insert_reserved = 0;
745
746 /* If reserved is provided, it must be a data extent. */
747 BUG_ON(!is_data && reserved);
748
749 /*
750 * The head node stores the sum of all the mods, so dropping a ref
751 * should drop the sum in the head node by one.
752 */
753 if (action == BTRFS_UPDATE_DELAYED_HEAD)
754 count_mod = 0;
755 else if (action == BTRFS_DROP_DELAYED_REF)
756 count_mod = -1;
757
758 /*
759 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
760 * accounting when the extent is finally added, or if a later
761 * modification deletes the delayed ref without ever inserting the
762 * extent into the extent allocation tree. ref->must_insert_reserved
763 * is the flag used to record that accounting mods are required.
764 *
765 * Once we record must_insert_reserved, switch the action to
766 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
767 */
768 if (action == BTRFS_ADD_DELAYED_EXTENT)
769 must_insert_reserved = 1;
770 else
771 must_insert_reserved = 0;
772
773 refcount_set(&head_ref->refs, 1);
774 head_ref->bytenr = bytenr;
775 head_ref->num_bytes = num_bytes;
776 head_ref->ref_mod = count_mod;
777 head_ref->must_insert_reserved = must_insert_reserved;
778 head_ref->is_data = is_data;
779 head_ref->is_system = is_system;
Liu Boe3d03962018-08-23 03:51:50 +0800780 head_ref->ref_tree = RB_ROOT_CACHED;
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300781 INIT_LIST_HEAD(&head_ref->ref_add_list);
782 RB_CLEAR_NODE(&head_ref->href_node);
783 head_ref->processing = 0;
784 head_ref->total_ref_mod = count_mod;
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300785 spin_lock_init(&head_ref->lock);
786 mutex_init(&head_ref->mutex);
787
788 if (qrecord) {
789 if (ref_root && reserved) {
Qu Wenruo1418bae2019-01-23 15:15:12 +0800790 qrecord->data_rsv = reserved;
791 qrecord->data_rsv_refroot = ref_root;
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300792 }
Nikolay Borisova2e569b2018-04-24 17:18:22 +0300793 qrecord->bytenr = bytenr;
794 qrecord->num_bytes = num_bytes;
795 qrecord->old_roots = NULL;
796 }
797}
798
Chris Mason56bec292009-03-13 10:10:06 -0400799/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400800 * helper function to actually insert a head node into the rbtree.
Chris Mason56bec292009-03-13 10:10:06 -0400801 * this does all the dirty work in terms of maintaining the correct
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400802 * overall modification count.
Chris Mason56bec292009-03-13 10:10:06 -0400803 */
Josef Bacikd7df2c72014-01-23 09:21:38 -0500804static noinline struct btrfs_delayed_ref_head *
Nikolay Borisov1acda0c2018-04-19 11:06:37 +0300805add_delayed_ref_head(struct btrfs_trans_handle *trans,
Josef Bacikd2788502017-09-29 15:43:57 -0400806 struct btrfs_delayed_ref_head *head_ref,
Qu Wenruo3368d002015-04-16 14:34:17 +0800807 struct btrfs_qgroup_extent_record *qrecord,
Josef Bacik21873742021-01-15 16:48:55 -0500808 int action, int *qrecord_inserted_ret)
Chris Mason56bec292009-03-13 10:10:06 -0400809{
Josef Bacikd7df2c72014-01-23 09:21:38 -0500810 struct btrfs_delayed_ref_head *existing;
Chris Mason56bec292009-03-13 10:10:06 -0400811 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800812 int qrecord_inserted = 0;
Chris Mason56bec292009-03-13 10:10:06 -0400813
Chris Mason56bec292009-03-13 10:10:06 -0400814 delayed_refs = &trans->transaction->delayed_refs;
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300815
Qu Wenruo3368d002015-04-16 14:34:17 +0800816 /* Record qgroup extent info if provided */
817 if (qrecord) {
Nikolay Borisoveb86ec72018-04-24 17:18:23 +0300818 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
Qu Wenruocb93b522016-08-15 10:36:50 +0800819 delayed_refs, qrecord))
Qu Wenruo3368d002015-04-16 14:34:17 +0800820 kfree(qrecord);
Qu Wenruofb235dc2017-02-15 10:43:03 +0800821 else
822 qrecord_inserted = 1;
Qu Wenruo3368d002015-04-16 14:34:17 +0800823 }
824
Nikolay Borisov1acda0c2018-04-19 11:06:37 +0300825 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
liubo1abe9b82011-03-24 11:18:59 +0000826
Josef Bacikd7df2c72014-01-23 09:21:38 -0500827 existing = htree_insert(&delayed_refs->href_root,
828 &head_ref->href_node);
Chris Mason56bec292009-03-13 10:10:06 -0400829 if (existing) {
Josef Bacik21873742021-01-15 16:48:55 -0500830 update_existing_head_ref(trans, existing, head_ref);
Chris Mason56bec292009-03-13 10:10:06 -0400831 /*
832 * we've updated the existing ref, free the newly
833 * allocated ref
834 */
Miao Xie78a61842012-11-21 02:21:28 +0000835 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
Josef Bacikd7df2c72014-01-23 09:21:38 -0500836 head_ref = existing;
Chris Mason56bec292009-03-13 10:10:06 -0400837 } else {
Josef Bacikba2c4d42018-12-03 10:20:33 -0500838 if (head_ref->is_data && head_ref->ref_mod < 0) {
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300839 delayed_refs->pending_csums += head_ref->num_bytes;
Josef Bacikba2c4d42018-12-03 10:20:33 -0500840 trans->delayed_ref_updates +=
841 btrfs_csum_bytes_to_leaves(trans->fs_info,
842 head_ref->num_bytes);
843 }
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400844 delayed_refs->num_heads++;
845 delayed_refs->num_heads_ready++;
Josef Bacikd7df2c72014-01-23 09:21:38 -0500846 atomic_inc(&delayed_refs->num_entries);
Chris Mason56bec292009-03-13 10:10:06 -0400847 trans->delayed_ref_updates++;
848 }
Qu Wenruofb235dc2017-02-15 10:43:03 +0800849 if (qrecord_inserted_ret)
850 *qrecord_inserted_ret = qrecord_inserted;
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300851
Josef Bacikd7df2c72014-01-23 09:21:38 -0500852 return head_ref;
Chris Mason56bec292009-03-13 10:10:06 -0400853}
854
855/*
Nikolay Borisovcb49a872018-04-24 17:18:17 +0300856 * init_delayed_ref_common - Initialize the structure which represents a
857 * modification to a an extent.
858 *
859 * @fs_info: Internal to the mounted filesystem mount structure.
860 *
861 * @ref: The structure which is going to be initialized.
862 *
863 * @bytenr: The logical address of the extent for which a modification is
864 * going to be recorded.
865 *
866 * @num_bytes: Size of the extent whose modification is being recorded.
867 *
868 * @ref_root: The id of the root where this modification has originated, this
869 * can be either one of the well-known metadata trees or the
870 * subvolume id which references this extent.
871 *
872 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
873 * BTRFS_ADD_DELAYED_EXTENT
874 *
875 * @ref_type: Holds the type of the extent which is being recorded, can be
876 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
877 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
878 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
879 */
880static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
881 struct btrfs_delayed_ref_node *ref,
882 u64 bytenr, u64 num_bytes, u64 ref_root,
883 int action, u8 ref_type)
884{
885 u64 seq = 0;
886
887 if (action == BTRFS_ADD_DELAYED_EXTENT)
888 action = BTRFS_ADD_DELAYED_REF;
889
890 if (is_fstree(ref_root))
891 seq = atomic64_read(&fs_info->tree_mod_seq);
892
893 refcount_set(&ref->refs, 1);
894 ref->bytenr = bytenr;
895 ref->num_bytes = num_bytes;
896 ref->ref_mod = 1;
897 ref->action = action;
898 ref->is_head = 0;
899 ref->in_tree = 1;
900 ref->seq = seq;
901 ref->type = ref_type;
902 RB_CLEAR_NODE(&ref->ref_node);
903 INIT_LIST_HEAD(&ref->add_list);
904}
905
906/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400907 * add a delayed tree ref. This does all of the accounting required
Chris Mason56bec292009-03-13 10:10:06 -0400908 * to make sure the delayed ref is eventually processed before this
909 * transaction commits.
910 */
Nikolay Borisov44e1c472018-06-20 15:48:53 +0300911int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
Qu Wenruoed4f2552019-04-04 14:45:31 +0800912 struct btrfs_ref *generic_ref,
Josef Bacik21873742021-01-15 16:48:55 -0500913 struct btrfs_delayed_extent_op *extent_op)
Chris Mason56bec292009-03-13 10:10:06 -0400914{
Nikolay Borisov44e1c472018-06-20 15:48:53 +0300915 struct btrfs_fs_info *fs_info = trans->fs_info;
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400916 struct btrfs_delayed_tree_ref *ref;
Chris Mason56bec292009-03-13 10:10:06 -0400917 struct btrfs_delayed_ref_head *head_ref;
918 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +0800919 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +0800920 int qrecord_inserted;
Qu Wenruoed4f2552019-04-04 14:45:31 +0800921 bool is_system;
922 int action = generic_ref->action;
923 int level = generic_ref->tree_ref.level;
Nikolay Borisov70d64002018-04-24 17:18:20 +0300924 int ret;
Qu Wenruoed4f2552019-04-04 14:45:31 +0800925 u64 bytenr = generic_ref->bytenr;
926 u64 num_bytes = generic_ref->len;
927 u64 parent = generic_ref->parent;
Nikolay Borisov70d64002018-04-24 17:18:20 +0300928 u8 ref_type;
Chris Mason56bec292009-03-13 10:10:06 -0400929
Nikolay Borisovd55b9e62021-10-12 11:21:34 +0300930 is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
Qu Wenruoed4f2552019-04-04 14:45:31 +0800931
932 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400933 BUG_ON(extent_op && extent_op->is_data);
Miao Xie78a61842012-11-21 02:21:28 +0000934 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
Chris Mason56bec292009-03-13 10:10:06 -0400935 if (!ref)
936 return -ENOMEM;
937
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300938 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
939 if (!head_ref) {
940 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
941 return -ENOMEM;
942 }
943
944 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
Qu Wenruoed4f2552019-04-04 14:45:31 +0800945 !generic_ref->skip_qgroup) {
Qu Wenruo1418bae2019-01-23 15:15:12 +0800946 record = kzalloc(sizeof(*record), GFP_NOFS);
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300947 if (!record) {
948 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
949 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
950 return -ENOMEM;
951 }
952 }
953
Nikolay Borisov70d64002018-04-24 17:18:20 +0300954 if (parent)
955 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
956 else
957 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
Nikolay Borisov7b4284d2018-06-20 18:43:12 +0300958
Nikolay Borisov70d64002018-04-24 17:18:20 +0300959 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
Nikolay Borisov113479d2021-10-12 11:21:33 +0300960 generic_ref->tree_ref.owning_root, action,
961 ref_type);
962 ref->root = generic_ref->tree_ref.owning_root;
Nikolay Borisov70d64002018-04-24 17:18:20 +0300963 ref->parent = parent;
964 ref->level = level;
965
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300966 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
Nikolay Borisov113479d2021-10-12 11:21:33 +0300967 generic_ref->tree_ref.owning_root, 0, action,
968 false, is_system);
Yan Zheng5d4f98a2009-06-10 10:45:14 -0400969 head_ref->extent_op = extent_op;
970
Chris Mason56bec292009-03-13 10:10:06 -0400971 delayed_refs = &trans->transaction->delayed_refs;
972 spin_lock(&delayed_refs->lock);
973
974 /*
975 * insert both the head node and the new ref without dropping
976 * the spin lock
977 */
Nikolay Borisov2335efa2018-04-24 17:18:24 +0300978 head_ref = add_delayed_ref_head(trans, head_ref, record,
Josef Bacik21873742021-01-15 16:48:55 -0500979 action, &qrecord_inserted);
Chris Mason56bec292009-03-13 10:10:06 -0400980
Nikolay Borisov70d64002018-04-24 17:18:20 +0300981 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
Chris Mason56bec292009-03-13 10:10:06 -0400982 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +0200983
Josef Bacikba2c4d42018-12-03 10:20:33 -0500984 /*
985 * Need to update the delayed_refs_rsv with any changes we may have
986 * made.
987 */
988 btrfs_update_delayed_refs_rsv(trans);
989
Nikolay Borisov70d64002018-04-24 17:18:20 +0300990 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
991 action == BTRFS_ADD_DELAYED_EXTENT ?
992 BTRFS_ADD_DELAYED_REF : action);
993 if (ret > 0)
994 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
995
Qu Wenruofb235dc2017-02-15 10:43:03 +0800996 if (qrecord_inserted)
Filipe Manana8949b9a2021-07-21 17:31:48 +0100997 btrfs_qgroup_trace_extent_post(trans, record);
Nikolay Borisov952bd3db2018-01-29 15:53:01 +0200998
Chris Mason56bec292009-03-13 10:10:06 -0400999 return 0;
1000}
1001
1002/*
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001003 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1004 */
Nikolay Borisov88a979c2018-06-20 15:48:54 +03001005int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
Qu Wenruo76675592019-04-04 14:45:32 +08001006 struct btrfs_ref *generic_ref,
Josef Bacik21873742021-01-15 16:48:55 -05001007 u64 reserved)
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001008{
Nikolay Borisov88a979c2018-06-20 15:48:54 +03001009 struct btrfs_fs_info *fs_info = trans->fs_info;
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001010 struct btrfs_delayed_data_ref *ref;
1011 struct btrfs_delayed_ref_head *head_ref;
1012 struct btrfs_delayed_ref_root *delayed_refs;
Qu Wenruo3368d002015-04-16 14:34:17 +08001013 struct btrfs_qgroup_extent_record *record = NULL;
Qu Wenruofb235dc2017-02-15 10:43:03 +08001014 int qrecord_inserted;
Qu Wenruo76675592019-04-04 14:45:32 +08001015 int action = generic_ref->action;
Nikolay Borisovcd7f9692018-04-24 17:18:21 +03001016 int ret;
Qu Wenruo76675592019-04-04 14:45:32 +08001017 u64 bytenr = generic_ref->bytenr;
1018 u64 num_bytes = generic_ref->len;
1019 u64 parent = generic_ref->parent;
Nikolay Borisov113479d2021-10-12 11:21:33 +03001020 u64 ref_root = generic_ref->data_ref.owning_root;
Qu Wenruo76675592019-04-04 14:45:32 +08001021 u64 owner = generic_ref->data_ref.ino;
1022 u64 offset = generic_ref->data_ref.offset;
Nikolay Borisovcd7f9692018-04-24 17:18:21 +03001023 u8 ref_type;
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001024
Qu Wenruo76675592019-04-04 14:45:32 +08001025 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
Miao Xie78a61842012-11-21 02:21:28 +00001026 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001027 if (!ref)
1028 return -ENOMEM;
1029
Nikolay Borisovcd7f9692018-04-24 17:18:21 +03001030 if (parent)
1031 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1032 else
1033 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1034 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1035 ref_root, action, ref_type);
1036 ref->root = ref_root;
1037 ref->parent = parent;
1038 ref->objectid = owner;
1039 ref->offset = offset;
1040
1041
Miao Xie78a61842012-11-21 02:21:28 +00001042 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001043 if (!head_ref) {
Miao Xie78a61842012-11-21 02:21:28 +00001044 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001045 return -ENOMEM;
1046 }
1047
Josef Bacikafcdd122016-09-02 15:40:02 -04001048 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
Qu Wenruo76675592019-04-04 14:45:32 +08001049 !generic_ref->skip_qgroup) {
Qu Wenruo1418bae2019-01-23 15:15:12 +08001050 record = kzalloc(sizeof(*record), GFP_NOFS);
Qu Wenruo3368d002015-04-16 14:34:17 +08001051 if (!record) {
1052 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1053 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1054 head_ref);
1055 return -ENOMEM;
1056 }
1057 }
1058
Nikolay Borisov2335efa2018-04-24 17:18:24 +03001059 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1060 reserved, action, true, false);
Jeff Mahoneyfef394f2016-12-13 14:39:34 -05001061 head_ref->extent_op = NULL;
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001062
1063 delayed_refs = &trans->transaction->delayed_refs;
1064 spin_lock(&delayed_refs->lock);
1065
1066 /*
1067 * insert both the head node and the new ref without dropping
1068 * the spin lock
1069 */
Nikolay Borisov2335efa2018-04-24 17:18:24 +03001070 head_ref = add_delayed_ref_head(trans, head_ref, record,
Josef Bacik21873742021-01-15 16:48:55 -05001071 action, &qrecord_inserted);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001072
Nikolay Borisovcd7f9692018-04-24 17:18:21 +03001073 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001074 spin_unlock(&delayed_refs->lock);
Jan Schmidt95a06072012-05-29 17:06:54 +02001075
Josef Bacikba2c4d42018-12-03 10:20:33 -05001076 /*
1077 * Need to update the delayed_refs_rsv with any changes we may have
1078 * made.
1079 */
1080 btrfs_update_delayed_refs_rsv(trans);
1081
Nikolay Borisovcd7f9692018-04-24 17:18:21 +03001082 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1083 action == BTRFS_ADD_DELAYED_EXTENT ?
1084 BTRFS_ADD_DELAYED_REF : action);
1085 if (ret > 0)
1086 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1087
1088
Qu Wenruofb235dc2017-02-15 10:43:03 +08001089 if (qrecord_inserted)
Filipe Manana8949b9a2021-07-21 17:31:48 +01001090 return btrfs_qgroup_trace_extent_post(trans, record);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001091 return 0;
1092}
1093
David Sterbac6e340b2019-03-20 11:42:34 +01001094int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001095 u64 bytenr, u64 num_bytes,
1096 struct btrfs_delayed_extent_op *extent_op)
1097{
1098 struct btrfs_delayed_ref_head *head_ref;
1099 struct btrfs_delayed_ref_root *delayed_refs;
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001100
Miao Xie78a61842012-11-21 02:21:28 +00001101 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001102 if (!head_ref)
1103 return -ENOMEM;
1104
Nikolay Borisov2335efa2018-04-24 17:18:24 +03001105 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1106 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
1107 false);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001108 head_ref->extent_op = extent_op;
1109
1110 delayed_refs = &trans->transaction->delayed_refs;
1111 spin_lock(&delayed_refs->lock);
1112
Nikolay Borisov2335efa2018-04-24 17:18:24 +03001113 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
Josef Bacik21873742021-01-15 16:48:55 -05001114 NULL);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001115
1116 spin_unlock(&delayed_refs->lock);
Josef Bacikba2c4d42018-12-03 10:20:33 -05001117
1118 /*
1119 * Need to update the delayed_refs_rsv with any changes we may have
1120 * made.
1121 */
1122 btrfs_update_delayed_refs_rsv(trans);
Yan Zheng5d4f98a2009-06-10 10:45:14 -04001123 return 0;
1124}
1125
1126/*
David Sterba38e93722019-03-27 16:19:55 +01001127 * This does a simple search for the head node for a given extent. Returns the
1128 * head node if found, or NULL if not.
Chris Mason1887be62009-03-13 10:11:24 -04001129 */
1130struct btrfs_delayed_ref_head *
Liu Bof72ad18e2017-01-30 12:24:37 -08001131btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
Chris Mason1887be62009-03-13 10:11:24 -04001132{
David Sterba38e93722019-03-27 16:19:55 +01001133 lockdep_assert_held(&delayed_refs->lock);
1134
Lu Fengqid93527942018-10-11 13:40:38 +08001135 return find_ref_head(delayed_refs, bytenr, false);
Chris Mason1887be62009-03-13 10:11:24 -04001136}
Miao Xie78a61842012-11-21 02:21:28 +00001137
David Sterbae67c7182018-02-19 17:24:18 +01001138void __cold btrfs_delayed_ref_exit(void)
Miao Xie78a61842012-11-21 02:21:28 +00001139{
Kinglong Mee5598e902016-01-29 21:36:35 +08001140 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1141 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1142 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1143 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
Miao Xie78a61842012-11-21 02:21:28 +00001144}
1145
Liu Bof5c29bd2017-11-02 17:21:50 -06001146int __init btrfs_delayed_ref_init(void)
Miao Xie78a61842012-11-21 02:21:28 +00001147{
1148 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1149 "btrfs_delayed_ref_head",
1150 sizeof(struct btrfs_delayed_ref_head), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +03001151 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +00001152 if (!btrfs_delayed_ref_head_cachep)
1153 goto fail;
1154
1155 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1156 "btrfs_delayed_tree_ref",
1157 sizeof(struct btrfs_delayed_tree_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +03001158 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +00001159 if (!btrfs_delayed_tree_ref_cachep)
1160 goto fail;
1161
1162 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1163 "btrfs_delayed_data_ref",
1164 sizeof(struct btrfs_delayed_data_ref), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +03001165 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +00001166 if (!btrfs_delayed_data_ref_cachep)
1167 goto fail;
1168
1169 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1170 "btrfs_delayed_extent_op",
1171 sizeof(struct btrfs_delayed_extent_op), 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +03001172 SLAB_MEM_SPREAD, NULL);
Miao Xie78a61842012-11-21 02:21:28 +00001173 if (!btrfs_delayed_extent_op_cachep)
1174 goto fail;
1175
1176 return 0;
1177fail:
1178 btrfs_delayed_ref_exit();
1179 return -ENOMEM;
1180}