blob: e164766dcc3844a2f27901d6c954f72c894677c0 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Miao Xie16cdcec2011-04-22 18:12:22 +08002/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
Miao Xie16cdcec2011-04-22 18:12:22 +08005 */
6
7#include <linux/slab.h>
Jeff Laytonc7f88c42017-12-11 06:35:12 -05008#include <linux/iversion.h>
David Sterba602cbe92019-08-21 18:48:25 +02009#include "misc.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080010#include "delayed-inode.h"
11#include "disk-io.h"
12#include "transaction.h"
Qu Wenruo3cae2102013-07-16 11:19:18 +080013#include "ctree.h"
Qu Wenruo4f5427c2017-12-12 15:34:33 +080014#include "qgroup.h"
David Sterba1f95ec02019-09-24 19:17:17 +020015#include "locking.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080016
Chris Masonde3cb942013-03-04 17:13:31 -050017#define BTRFS_DELAYED_WRITEBACK 512
18#define BTRFS_DELAYED_BACKGROUND 128
19#define BTRFS_DELAYED_BATCH 16
Miao Xie16cdcec2011-04-22 18:12:22 +080020
21static struct kmem_cache *delayed_node_cache;
22
23int __init btrfs_delayed_inode_init(void)
24{
David Sterba837e1972012-09-07 03:00:48 -060025 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
Miao Xie16cdcec2011-04-22 18:12:22 +080026 sizeof(struct btrfs_delayed_node),
27 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +030028 SLAB_MEM_SPREAD,
Miao Xie16cdcec2011-04-22 18:12:22 +080029 NULL);
30 if (!delayed_node_cache)
31 return -ENOMEM;
32 return 0;
33}
34
David Sterbae67c7182018-02-19 17:24:18 +010035void __cold btrfs_delayed_inode_exit(void)
Miao Xie16cdcec2011-04-22 18:12:22 +080036{
Kinglong Mee5598e902016-01-29 21:36:35 +080037 kmem_cache_destroy(delayed_node_cache);
Miao Xie16cdcec2011-04-22 18:12:22 +080038}
39
40static inline void btrfs_init_delayed_node(
41 struct btrfs_delayed_node *delayed_node,
42 struct btrfs_root *root, u64 inode_id)
43{
44 delayed_node->root = root;
45 delayed_node->inode_id = inode_id;
Elena Reshetova6de5f182017-03-03 10:55:16 +020046 refcount_set(&delayed_node->refs, 0);
Liu Bo03a1d4c2018-08-23 03:51:51 +080047 delayed_node->ins_root = RB_ROOT_CACHED;
48 delayed_node->del_root = RB_ROOT_CACHED;
Miao Xie16cdcec2011-04-22 18:12:22 +080049 mutex_init(&delayed_node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +080050 INIT_LIST_HEAD(&delayed_node->n_list);
51 INIT_LIST_HEAD(&delayed_node->p_list);
Miao Xie16cdcec2011-04-22 18:12:22 +080052}
53
54static inline int btrfs_is_continuous_delayed_item(
55 struct btrfs_delayed_item *item1,
56 struct btrfs_delayed_item *item2)
57{
58 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
59 item1->key.objectid == item2->key.objectid &&
60 item1->key.type == item2->key.type &&
61 item1->key.offset + 1 == item2->key.offset)
62 return 1;
63 return 0;
64}
65
David Sterbaf85b7372017-01-20 14:54:07 +010066static struct btrfs_delayed_node *btrfs_get_delayed_node(
67 struct btrfs_inode *btrfs_inode)
Miao Xie2f7e33d2011-06-23 07:27:13 +000068{
Miao Xie2f7e33d2011-06-23 07:27:13 +000069 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +020070 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +000071 struct btrfs_delayed_node *node;
72
Seraphime Kirkovski20c7bce2016-12-15 14:38:16 +010073 node = READ_ONCE(btrfs_inode->delayed_node);
Miao Xie2f7e33d2011-06-23 07:27:13 +000074 if (node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020075 refcount_inc(&node->refs);
Miao Xie2f7e33d2011-06-23 07:27:13 +000076 return node;
77 }
78
79 spin_lock(&root->inode_lock);
80 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
Chris Masonec35e482017-12-15 11:58:27 -080081
Miao Xie2f7e33d2011-06-23 07:27:13 +000082 if (node) {
83 if (btrfs_inode->delayed_node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020084 refcount_inc(&node->refs); /* can be accessed */
Miao Xie2f7e33d2011-06-23 07:27:13 +000085 BUG_ON(btrfs_inode->delayed_node != node);
86 spin_unlock(&root->inode_lock);
87 return node;
88 }
Chris Masonec35e482017-12-15 11:58:27 -080089
90 /*
91 * It's possible that we're racing into the middle of removing
92 * this node from the radix tree. In this case, the refcount
93 * was zero and it should never go back to one. Just return
94 * NULL like it was never in the radix at all; our release
95 * function is in the process of removing it.
96 *
97 * Some implementations of refcount_inc refuse to bump the
98 * refcount once it has hit zero. If we don't do this dance
99 * here, refcount_inc() may decide to just WARN_ONCE() instead
100 * of actually bumping the refcount.
101 *
102 * If this node is properly in the radix, we want to bump the
103 * refcount twice, once for the inode and once for this get
104 * operation.
105 */
106 if (refcount_inc_not_zero(&node->refs)) {
107 refcount_inc(&node->refs);
108 btrfs_inode->delayed_node = node;
109 } else {
110 node = NULL;
111 }
112
Miao Xie2f7e33d2011-06-23 07:27:13 +0000113 spin_unlock(&root->inode_lock);
114 return node;
115 }
116 spin_unlock(&root->inode_lock);
117
118 return NULL;
119}
120
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100121/* Will return either the node or PTR_ERR(-ENOMEM) */
Miao Xie16cdcec2011-04-22 18:12:22 +0800122static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
David Sterbaf85b7372017-01-20 14:54:07 +0100123 struct btrfs_inode *btrfs_inode)
Miao Xie16cdcec2011-04-22 18:12:22 +0800124{
125 struct btrfs_delayed_node *node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800126 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +0200127 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +0800128 int ret;
129
130again:
Nikolay Borisov340c6ca2017-01-10 20:35:32 +0200131 node = btrfs_get_delayed_node(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +0000132 if (node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800133 return node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800134
Alexandru Moise352dd9c2015-10-25 20:15:06 +0000135 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800136 if (!node)
137 return ERR_PTR(-ENOMEM);
Chris Mason0d0ca302011-05-22 07:11:22 -0400138 btrfs_init_delayed_node(node, root, ino);
Miao Xie16cdcec2011-04-22 18:12:22 +0800139
Rashika95e94d12013-10-31 03:12:42 +0530140 /* cached in the btrfs inode and can be accessed */
Elena Reshetova6de5f182017-03-03 10:55:16 +0200141 refcount_set(&node->refs, 2);
Miao Xie16cdcec2011-04-22 18:12:22 +0800142
David Sterbae1860a72016-05-09 14:11:38 +0200143 ret = radix_tree_preload(GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800144 if (ret) {
145 kmem_cache_free(delayed_node_cache, node);
146 return ERR_PTR(ret);
147 }
148
149 spin_lock(&root->inode_lock);
Chris Mason0d0ca302011-05-22 07:11:22 -0400150 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800151 if (ret == -EEXIST) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800152 spin_unlock(&root->inode_lock);
Jeff Mahoney96493032014-05-27 13:53:20 -0400153 kmem_cache_free(delayed_node_cache, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800154 radix_tree_preload_end();
155 goto again;
156 }
157 btrfs_inode->delayed_node = node;
158 spin_unlock(&root->inode_lock);
159 radix_tree_preload_end();
160
161 return node;
162}
163
164/*
165 * Call it when holding delayed_node->mutex
166 *
167 * If mod = 1, add this node into the prepared list.
168 */
169static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
170 struct btrfs_delayed_node *node,
171 int mod)
172{
173 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800174 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800175 if (!list_empty(&node->p_list))
176 list_move_tail(&node->p_list, &root->prepare_list);
177 else if (mod)
178 list_add_tail(&node->p_list, &root->prepare_list);
179 } else {
180 list_add_tail(&node->n_list, &root->node_list);
181 list_add_tail(&node->p_list, &root->prepare_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200182 refcount_inc(&node->refs); /* inserted into list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800183 root->nodes++;
Miao Xie7cf35d92013-12-26 13:07:05 +0800184 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800185 }
186 spin_unlock(&root->lock);
187}
188
189/* Call it when holding delayed_node->mutex */
190static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
191 struct btrfs_delayed_node *node)
192{
193 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800194 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800195 root->nodes--;
Elena Reshetova6de5f182017-03-03 10:55:16 +0200196 refcount_dec(&node->refs); /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800197 list_del_init(&node->n_list);
198 if (!list_empty(&node->p_list))
199 list_del_init(&node->p_list);
Miao Xie7cf35d92013-12-26 13:07:05 +0800200 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800201 }
202 spin_unlock(&root->lock);
203}
204
Eric Sandeen48a3b632013-04-25 20:41:01 +0000205static struct btrfs_delayed_node *btrfs_first_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800206 struct btrfs_delayed_root *delayed_root)
207{
208 struct list_head *p;
209 struct btrfs_delayed_node *node = NULL;
210
211 spin_lock(&delayed_root->lock);
212 if (list_empty(&delayed_root->node_list))
213 goto out;
214
215 p = delayed_root->node_list.next;
216 node = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200217 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800218out:
219 spin_unlock(&delayed_root->lock);
220
221 return node;
222}
223
Eric Sandeen48a3b632013-04-25 20:41:01 +0000224static struct btrfs_delayed_node *btrfs_next_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800225 struct btrfs_delayed_node *node)
226{
227 struct btrfs_delayed_root *delayed_root;
228 struct list_head *p;
229 struct btrfs_delayed_node *next = NULL;
230
231 delayed_root = node->root->fs_info->delayed_root;
232 spin_lock(&delayed_root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800233 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
234 /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800235 if (list_empty(&delayed_root->node_list))
236 goto out;
237 p = delayed_root->node_list.next;
238 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
239 goto out;
240 else
241 p = node->n_list.next;
242
243 next = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200244 refcount_inc(&next->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800245out:
246 spin_unlock(&delayed_root->lock);
247
248 return next;
249}
250
251static void __btrfs_release_delayed_node(
252 struct btrfs_delayed_node *delayed_node,
253 int mod)
254{
255 struct btrfs_delayed_root *delayed_root;
256
257 if (!delayed_node)
258 return;
259
260 delayed_root = delayed_node->root->fs_info->delayed_root;
261
262 mutex_lock(&delayed_node->mutex);
263 if (delayed_node->count)
264 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
265 else
266 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
267 mutex_unlock(&delayed_node->mutex);
268
Elena Reshetova6de5f182017-03-03 10:55:16 +0200269 if (refcount_dec_and_test(&delayed_node->refs)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800270 struct btrfs_root *root = delayed_node->root;
Chris Masonec35e482017-12-15 11:58:27 -0800271
Miao Xie16cdcec2011-04-22 18:12:22 +0800272 spin_lock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800273 /*
274 * Once our refcount goes to zero, nobody is allowed to bump it
275 * back up. We can delete it now.
276 */
277 ASSERT(refcount_read(&delayed_node->refs) == 0);
278 radix_tree_delete(&root->delayed_nodes_tree,
279 delayed_node->inode_id);
Miao Xie16cdcec2011-04-22 18:12:22 +0800280 spin_unlock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800281 kmem_cache_free(delayed_node_cache, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800282 }
283}
284
285static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
286{
287 __btrfs_release_delayed_node(node, 0);
288}
289
Eric Sandeen48a3b632013-04-25 20:41:01 +0000290static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800291 struct btrfs_delayed_root *delayed_root)
292{
293 struct list_head *p;
294 struct btrfs_delayed_node *node = NULL;
295
296 spin_lock(&delayed_root->lock);
297 if (list_empty(&delayed_root->prepare_list))
298 goto out;
299
300 p = delayed_root->prepare_list.next;
301 list_del_init(p);
302 node = list_entry(p, struct btrfs_delayed_node, p_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200303 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800304out:
305 spin_unlock(&delayed_root->lock);
306
307 return node;
308}
309
310static inline void btrfs_release_prepared_delayed_node(
311 struct btrfs_delayed_node *node)
312{
313 __btrfs_release_delayed_node(node, 1);
314}
315
Eric Sandeen48a3b632013-04-25 20:41:01 +0000316static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
Miao Xie16cdcec2011-04-22 18:12:22 +0800317{
318 struct btrfs_delayed_item *item;
319 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
320 if (item) {
321 item->data_len = data_len;
322 item->ins_or_del = 0;
323 item->bytes_reserved = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800324 item->delayed_node = NULL;
Elena Reshetova089e77e2017-03-03 10:55:17 +0200325 refcount_set(&item->refs, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800326 }
327 return item;
328}
329
330/*
331 * __btrfs_lookup_delayed_item - look up the delayed item by key
332 * @delayed_node: pointer to the delayed node
333 * @key: the key to look up
334 * @prev: used to store the prev item if the right item isn't found
335 * @next: used to store the next item if the right item isn't found
336 *
337 * Note: if we don't find the right item, we will return the prev item and
338 * the next item.
339 */
340static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
341 struct rb_root *root,
342 struct btrfs_key *key,
343 struct btrfs_delayed_item **prev,
344 struct btrfs_delayed_item **next)
345{
346 struct rb_node *node, *prev_node = NULL;
347 struct btrfs_delayed_item *delayed_item = NULL;
348 int ret = 0;
349
350 node = root->rb_node;
351
352 while (node) {
353 delayed_item = rb_entry(node, struct btrfs_delayed_item,
354 rb_node);
355 prev_node = node;
356 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
357 if (ret < 0)
358 node = node->rb_right;
359 else if (ret > 0)
360 node = node->rb_left;
361 else
362 return delayed_item;
363 }
364
365 if (prev) {
366 if (!prev_node)
367 *prev = NULL;
368 else if (ret < 0)
369 *prev = delayed_item;
370 else if ((node = rb_prev(prev_node)) != NULL) {
371 *prev = rb_entry(node, struct btrfs_delayed_item,
372 rb_node);
373 } else
374 *prev = NULL;
375 }
376
377 if (next) {
378 if (!prev_node)
379 *next = NULL;
380 else if (ret > 0)
381 *next = delayed_item;
382 else if ((node = rb_next(prev_node)) != NULL) {
383 *next = rb_entry(node, struct btrfs_delayed_item,
384 rb_node);
385 } else
386 *next = NULL;
387 }
388 return NULL;
389}
390
Eric Sandeen48a3b632013-04-25 20:41:01 +0000391static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800392 struct btrfs_delayed_node *delayed_node,
393 struct btrfs_key *key)
394{
Liu Bo03a1d4c2018-08-23 03:51:51 +0800395 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
Miao Xie16cdcec2011-04-22 18:12:22 +0800396 NULL, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800397}
398
Miao Xie16cdcec2011-04-22 18:12:22 +0800399static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
400 struct btrfs_delayed_item *ins,
401 int action)
402{
403 struct rb_node **p, *node;
404 struct rb_node *parent_node = NULL;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800405 struct rb_root_cached *root;
Miao Xie16cdcec2011-04-22 18:12:22 +0800406 struct btrfs_delayed_item *item;
407 int cmp;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800408 bool leftmost = true;
Miao Xie16cdcec2011-04-22 18:12:22 +0800409
410 if (action == BTRFS_DELAYED_INSERTION_ITEM)
411 root = &delayed_node->ins_root;
412 else if (action == BTRFS_DELAYED_DELETION_ITEM)
413 root = &delayed_node->del_root;
414 else
415 BUG();
Liu Bo03a1d4c2018-08-23 03:51:51 +0800416 p = &root->rb_root.rb_node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800417 node = &ins->rb_node;
418
419 while (*p) {
420 parent_node = *p;
421 item = rb_entry(parent_node, struct btrfs_delayed_item,
422 rb_node);
423
424 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
Liu Bo03a1d4c2018-08-23 03:51:51 +0800425 if (cmp < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800426 p = &(*p)->rb_right;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800427 leftmost = false;
428 } else if (cmp > 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800429 p = &(*p)->rb_left;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800430 } else {
Miao Xie16cdcec2011-04-22 18:12:22 +0800431 return -EEXIST;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800432 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800433 }
434
435 rb_link_node(node, parent_node, p);
Liu Bo03a1d4c2018-08-23 03:51:51 +0800436 rb_insert_color_cached(node, root, leftmost);
Miao Xie16cdcec2011-04-22 18:12:22 +0800437 ins->delayed_node = delayed_node;
438 ins->ins_or_del = action;
439
440 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
441 action == BTRFS_DELAYED_INSERTION_ITEM &&
442 ins->key.offset >= delayed_node->index_cnt)
443 delayed_node->index_cnt = ins->key.offset + 1;
444
445 delayed_node->count++;
446 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
447 return 0;
448}
449
450static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
451 struct btrfs_delayed_item *item)
452{
453 return __btrfs_add_delayed_item(node, item,
454 BTRFS_DELAYED_INSERTION_ITEM);
455}
456
457static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
458 struct btrfs_delayed_item *item)
459{
460 return __btrfs_add_delayed_item(node, item,
461 BTRFS_DELAYED_DELETION_ITEM);
462}
463
Chris Masonde3cb942013-03-04 17:13:31 -0500464static void finish_one_item(struct btrfs_delayed_root *delayed_root)
465{
466 int seq = atomic_inc_return(&delayed_root->items_seq);
David Sterbaee863952015-02-16 19:41:40 +0100467
David Sterba093258e2018-02-26 16:15:17 +0100468 /* atomic_dec_return implies a barrier */
Chris Masonde3cb942013-03-04 17:13:31 -0500469 if ((atomic_dec_return(&delayed_root->items) <
David Sterba093258e2018-02-26 16:15:17 +0100470 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
471 cond_wake_up_nomb(&delayed_root->wait);
Chris Masonde3cb942013-03-04 17:13:31 -0500472}
473
Miao Xie16cdcec2011-04-22 18:12:22 +0800474static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
475{
Liu Bo03a1d4c2018-08-23 03:51:51 +0800476 struct rb_root_cached *root;
Miao Xie16cdcec2011-04-22 18:12:22 +0800477 struct btrfs_delayed_root *delayed_root;
478
Qu Wenruo933c22a2019-07-16 17:00:32 +0800479 /* Not associated with any delayed_node */
480 if (!delayed_item->delayed_node)
481 return;
Miao Xie16cdcec2011-04-22 18:12:22 +0800482 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
483
484 BUG_ON(!delayed_root);
485 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
486 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
487
488 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
489 root = &delayed_item->delayed_node->ins_root;
490 else
491 root = &delayed_item->delayed_node->del_root;
492
Liu Bo03a1d4c2018-08-23 03:51:51 +0800493 rb_erase_cached(&delayed_item->rb_node, root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800494 delayed_item->delayed_node->count--;
Chris Masonde3cb942013-03-04 17:13:31 -0500495
496 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800497}
498
499static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
500{
501 if (item) {
502 __btrfs_remove_delayed_item(item);
Elena Reshetova089e77e2017-03-03 10:55:17 +0200503 if (refcount_dec_and_test(&item->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +0800504 kfree(item);
505 }
506}
507
Eric Sandeen48a3b632013-04-25 20:41:01 +0000508static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800509 struct btrfs_delayed_node *delayed_node)
510{
511 struct rb_node *p;
512 struct btrfs_delayed_item *item = NULL;
513
Liu Bo03a1d4c2018-08-23 03:51:51 +0800514 p = rb_first_cached(&delayed_node->ins_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800515 if (p)
516 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
517
518 return item;
519}
520
Eric Sandeen48a3b632013-04-25 20:41:01 +0000521static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800522 struct btrfs_delayed_node *delayed_node)
523{
524 struct rb_node *p;
525 struct btrfs_delayed_item *item = NULL;
526
Liu Bo03a1d4c2018-08-23 03:51:51 +0800527 p = rb_first_cached(&delayed_node->del_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800528 if (p)
529 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
530
531 return item;
532}
533
Eric Sandeen48a3b632013-04-25 20:41:01 +0000534static struct btrfs_delayed_item *__btrfs_next_delayed_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800535 struct btrfs_delayed_item *item)
536{
537 struct rb_node *p;
538 struct btrfs_delayed_item *next = NULL;
539
540 p = rb_next(&item->rb_node);
541 if (p)
542 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
543
544 return next;
545}
546
Miao Xie16cdcec2011-04-22 18:12:22 +0800547static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800548 struct btrfs_root *root,
Miao Xie16cdcec2011-04-22 18:12:22 +0800549 struct btrfs_delayed_item *item)
550{
551 struct btrfs_block_rsv *src_rsv;
552 struct btrfs_block_rsv *dst_rsv;
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800553 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800554 u64 num_bytes;
555 int ret;
556
557 if (!trans->bytes_reserved)
558 return 0;
559
560 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400561 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800562
Josef Bacik2bd36e72019-08-22 15:14:33 -0400563 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
Qu Wenruof218ea62018-04-17 16:52:45 +0800564
565 /*
566 * Here we migrate space rsv from transaction rsv, since have already
567 * reserved space when starting a transaction. So no need to reserve
568 * qgroup space here.
569 */
Lu Fengqi3a584172018-08-04 21:10:55 +0800570 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500571 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400572 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500573 item->key.objectid,
574 num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800575 item->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500576 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800577
578 return ret;
579}
580
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800581static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
Miao Xie16cdcec2011-04-22 18:12:22 +0800582 struct btrfs_delayed_item *item)
583{
Miao Xie19fd2942011-06-15 10:47:30 +0000584 struct btrfs_block_rsv *rsv;
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800585 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie19fd2942011-06-15 10:47:30 +0000586
Miao Xie16cdcec2011-04-22 18:12:22 +0800587 if (!item->bytes_reserved)
588 return;
589
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400590 rsv = &fs_info->delayed_block_rsv;
Qu Wenruof218ea62018-04-17 16:52:45 +0800591 /*
592 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
593 * to release/reserve qgroup space.
594 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400595 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500596 item->key.objectid, item->bytes_reserved,
597 0);
Nikolay Borisov63f018b2020-03-10 10:59:31 +0200598 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800599}
600
601static int btrfs_delayed_inode_reserve_metadata(
602 struct btrfs_trans_handle *trans,
603 struct btrfs_root *root,
604 struct btrfs_delayed_node *node)
605{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400606 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800607 struct btrfs_block_rsv *src_rsv;
608 struct btrfs_block_rsv *dst_rsv;
609 u64 num_bytes;
610 int ret;
611
Miao Xie16cdcec2011-04-22 18:12:22 +0800612 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400613 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800614
Josef Bacikbcacf5f32019-08-22 15:14:34 -0400615 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400616
617 /*
618 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
619 * which doesn't reserve space for speed. This is a problem since we
620 * still need to reserve space for this update, so try to reserve the
621 * space.
622 *
623 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
Josef Bacik69fe2d72017-10-19 14:15:57 -0400624 * we always reserve enough to update the inode item.
Josef Bacikc06a0e12011-11-04 19:56:02 -0400625 */
Chris Masone755d9a2011-12-15 13:36:29 -0500626 if (!src_rsv || (!trans->bytes_reserved &&
Miao Xie66d8f3d2012-09-06 04:02:28 -0600627 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
Nikolay Borisov4d14c5c2021-02-22 18:40:44 +0200628 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
629 BTRFS_QGROUP_RSV_META_PREALLOC, true);
Qu Wenruof218ea62018-04-17 16:52:45 +0800630 if (ret < 0)
631 return ret;
Miao Xie08e007d2012-10-16 11:33:38 +0000632 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
633 BTRFS_RESERVE_NO_FLUSH);
Nikolay Borisov98686ff2021-02-22 18:40:47 +0200634 /* NO_FLUSH could only fail with -ENOSPC */
635 ASSERT(ret == 0 || ret == -ENOSPC);
636 if (ret)
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800637 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
Nikolay Borisov98686ff2021-02-22 18:40:47 +0200638 } else {
639 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400640 }
641
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500642 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400643 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Nikolay Borisov8e3c9d32021-02-22 18:40:46 +0200644 node->inode_id, num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800645 node->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500646 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800647
648 return ret;
649}
650
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400651static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800652 struct btrfs_delayed_node *node,
653 bool qgroup_free)
Miao Xie16cdcec2011-04-22 18:12:22 +0800654{
655 struct btrfs_block_rsv *rsv;
656
657 if (!node->bytes_reserved)
658 return;
659
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400660 rsv = &fs_info->delayed_block_rsv;
661 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500662 node->inode_id, node->bytes_reserved, 0);
Nikolay Borisov63f018b2020-03-10 10:59:31 +0200663 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800664 if (qgroup_free)
665 btrfs_qgroup_free_meta_prealloc(node->root,
666 node->bytes_reserved);
667 else
668 btrfs_qgroup_convert_reserved_meta(node->root,
669 node->bytes_reserved);
Miao Xie16cdcec2011-04-22 18:12:22 +0800670 node->bytes_reserved = 0;
671}
672
673/*
Filipe Manana506650d2021-07-20 16:05:22 +0100674 * Insert a single delayed item or a batch of delayed items that have consecutive
675 * keys if they exist.
Miao Xie16cdcec2011-04-22 18:12:22 +0800676 */
677static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
678 struct btrfs_root *root,
679 struct btrfs_path *path,
Filipe Manana506650d2021-07-20 16:05:22 +0100680 struct btrfs_delayed_item *first_item)
Miao Xie16cdcec2011-04-22 18:12:22 +0800681{
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100682 LIST_HEAD(item_list);
Filipe Manana506650d2021-07-20 16:05:22 +0100683 struct btrfs_delayed_item *curr;
684 struct btrfs_delayed_item *next;
685 const int max_size = BTRFS_LEAF_DATA_SIZE(root->fs_info);
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100686 struct btrfs_item_batch batch;
Filipe Manana506650d2021-07-20 16:05:22 +0100687 int total_size;
Filipe Manana506650d2021-07-20 16:05:22 +0100688 char *ins_data = NULL;
Miao Xie16cdcec2011-04-22 18:12:22 +0800689 int ret;
690
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100691 list_add_tail(&first_item->tree_list, &item_list);
692 batch.total_data_size = first_item->data_len;
693 batch.nr = 1;
Filipe Manana506650d2021-07-20 16:05:22 +0100694 total_size = first_item->data_len + sizeof(struct btrfs_item);
695 curr = first_item;
696
697 while (true) {
698 int next_size;
699
700 next = __btrfs_next_delayed_item(curr);
701 if (!next || !btrfs_is_continuous_delayed_item(curr, next))
702 break;
703
704 next_size = next->data_len + sizeof(struct btrfs_item);
705 if (total_size + next_size > max_size)
706 break;
707
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100708 list_add_tail(&next->tree_list, &item_list);
709 batch.nr++;
Filipe Manana506650d2021-07-20 16:05:22 +0100710 total_size += next_size;
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100711 batch.total_data_size += next->data_len;
Filipe Manana506650d2021-07-20 16:05:22 +0100712 curr = next;
713 }
714
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100715 if (batch.nr == 1) {
716 batch.keys = &first_item->key;
717 batch.data_sizes = &first_item->data_len;
Filipe Manana506650d2021-07-20 16:05:22 +0100718 } else {
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100719 struct btrfs_key *ins_keys;
720 u32 *ins_sizes;
Filipe Manana506650d2021-07-20 16:05:22 +0100721 int i = 0;
722
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100723 ins_data = kmalloc(batch.nr * sizeof(u32) +
724 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
Filipe Manana506650d2021-07-20 16:05:22 +0100725 if (!ins_data) {
726 ret = -ENOMEM;
727 goto out;
728 }
729 ins_sizes = (u32 *)ins_data;
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100730 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
731 batch.keys = ins_keys;
732 batch.data_sizes = ins_sizes;
733 list_for_each_entry(curr, &item_list, tree_list) {
Filipe Manana506650d2021-07-20 16:05:22 +0100734 ins_keys[i] = curr->key;
735 ins_sizes[i] = curr->data_len;
736 i++;
737 }
738 }
739
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100740 ret = btrfs_insert_empty_items(trans, root, path, &batch);
Filipe Manana506650d2021-07-20 16:05:22 +0100741 if (ret)
742 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +0800743
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100744 list_for_each_entry(curr, &item_list, tree_list) {
Filipe Manana506650d2021-07-20 16:05:22 +0100745 char *data_ptr;
Miao Xie16cdcec2011-04-22 18:12:22 +0800746
Filipe Manana506650d2021-07-20 16:05:22 +0100747 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
748 write_extent_buffer(path->nodes[0], &curr->data,
749 (unsigned long)data_ptr, curr->data_len);
750 path->slots[0]++;
751 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800752
Filipe Manana506650d2021-07-20 16:05:22 +0100753 /*
754 * Now release our path before releasing the delayed items and their
755 * metadata reservations, so that we don't block other tasks for more
756 * time than needed.
757 */
758 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800759
Filipe Mananab7ef5f32021-09-24 12:28:13 +0100760 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
Filipe Manana506650d2021-07-20 16:05:22 +0100761 list_del(&curr->tree_list);
762 btrfs_delayed_item_release_metadata(root, curr);
763 btrfs_release_delayed_item(curr);
764 }
765out:
766 kfree(ins_data);
767 return ret;
Miao Xie16cdcec2011-04-22 18:12:22 +0800768}
769
Miao Xie16cdcec2011-04-22 18:12:22 +0800770static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
771 struct btrfs_path *path,
772 struct btrfs_root *root,
773 struct btrfs_delayed_node *node)
774{
Miao Xie16cdcec2011-04-22 18:12:22 +0800775 int ret = 0;
776
Filipe Manana506650d2021-07-20 16:05:22 +0100777 while (ret == 0) {
778 struct btrfs_delayed_item *curr;
Miao Xie16cdcec2011-04-22 18:12:22 +0800779
Filipe Manana506650d2021-07-20 16:05:22 +0100780 mutex_lock(&node->mutex);
781 curr = __btrfs_first_delayed_insertion_item(node);
782 if (!curr) {
783 mutex_unlock(&node->mutex);
784 break;
785 }
786 ret = btrfs_insert_delayed_item(trans, root, path, curr);
787 mutex_unlock(&node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +0800788 }
789
Miao Xie16cdcec2011-04-22 18:12:22 +0800790 return ret;
791}
792
793static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
794 struct btrfs_root *root,
795 struct btrfs_path *path,
796 struct btrfs_delayed_item *item)
797{
798 struct btrfs_delayed_item *curr, *next;
799 struct extent_buffer *leaf;
800 struct btrfs_key key;
801 struct list_head head;
802 int nitems, i, last_item;
803 int ret = 0;
804
805 BUG_ON(!path->nodes[0]);
806
807 leaf = path->nodes[0];
808
809 i = path->slots[0];
810 last_item = btrfs_header_nritems(leaf) - 1;
811 if (i > last_item)
812 return -ENOENT; /* FIXME: Is errno suitable? */
813
814 next = item;
815 INIT_LIST_HEAD(&head);
816 btrfs_item_key_to_cpu(leaf, &key, i);
817 nitems = 0;
818 /*
819 * count the number of the dir index items that we can delete in batch
820 */
821 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
822 list_add_tail(&next->tree_list, &head);
823 nitems++;
824
825 curr = next;
826 next = __btrfs_next_delayed_item(curr);
827 if (!next)
828 break;
829
830 if (!btrfs_is_continuous_delayed_item(curr, next))
831 break;
832
833 i++;
834 if (i > last_item)
835 break;
836 btrfs_item_key_to_cpu(leaf, &key, i);
837 }
838
839 if (!nitems)
840 return 0;
841
842 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
843 if (ret)
844 goto out;
845
846 list_for_each_entry_safe(curr, next, &head, tree_list) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800847 btrfs_delayed_item_release_metadata(root, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800848 list_del(&curr->tree_list);
849 btrfs_release_delayed_item(curr);
850 }
851
852out:
853 return ret;
854}
855
856static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
857 struct btrfs_path *path,
858 struct btrfs_root *root,
859 struct btrfs_delayed_node *node)
860{
861 struct btrfs_delayed_item *curr, *prev;
862 int ret = 0;
863
864do_again:
865 mutex_lock(&node->mutex);
866 curr = __btrfs_first_delayed_deletion_item(node);
867 if (!curr)
868 goto delete_fail;
869
870 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
871 if (ret < 0)
872 goto delete_fail;
873 else if (ret > 0) {
874 /*
875 * can't find the item which the node points to, so this node
876 * is invalid, just drop it.
877 */
878 prev = curr;
879 curr = __btrfs_next_delayed_item(prev);
880 btrfs_release_delayed_item(prev);
881 ret = 0;
Chris Mason945d8962011-05-22 12:33:42 -0400882 btrfs_release_path(path);
Fengguang Wu62095262012-08-04 01:45:02 -0600883 if (curr) {
884 mutex_unlock(&node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +0800885 goto do_again;
Fengguang Wu62095262012-08-04 01:45:02 -0600886 } else
Miao Xie16cdcec2011-04-22 18:12:22 +0800887 goto delete_fail;
888 }
889
890 btrfs_batch_delete_items(trans, root, path, curr);
Chris Mason945d8962011-05-22 12:33:42 -0400891 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800892 mutex_unlock(&node->mutex);
893 goto do_again;
894
895delete_fail:
Chris Mason945d8962011-05-22 12:33:42 -0400896 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800897 mutex_unlock(&node->mutex);
898 return ret;
899}
900
901static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
902{
903 struct btrfs_delayed_root *delayed_root;
904
Miao Xie7cf35d92013-12-26 13:07:05 +0800905 if (delayed_node &&
906 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800907 BUG_ON(!delayed_node->root);
Miao Xie7cf35d92013-12-26 13:07:05 +0800908 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800909 delayed_node->count--;
910
911 delayed_root = delayed_node->root->fs_info->delayed_root;
Chris Masonde3cb942013-03-04 17:13:31 -0500912 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800913 }
914}
915
Miao Xie67de1172013-12-26 13:07:06 +0800916static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
917{
Miao Xie67de1172013-12-26 13:07:06 +0800918
Josef Bacika4cb90d2021-05-21 16:44:07 -0400919 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
920 struct btrfs_delayed_root *delayed_root;
Miao Xie67de1172013-12-26 13:07:06 +0800921
Josef Bacika4cb90d2021-05-21 16:44:07 -0400922 ASSERT(delayed_node->root);
923 delayed_node->count--;
924
925 delayed_root = delayed_node->root->fs_info->delayed_root;
926 finish_one_item(delayed_root);
927 }
Miao Xie67de1172013-12-26 13:07:06 +0800928}
929
Miao Xie0e8c36a2012-12-19 06:59:51 +0000930static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
931 struct btrfs_root *root,
932 struct btrfs_path *path,
933 struct btrfs_delayed_node *node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800934{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400935 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800936 struct btrfs_key key;
937 struct btrfs_inode_item *inode_item;
938 struct extent_buffer *leaf;
Miao Xie67de1172013-12-26 13:07:06 +0800939 int mod;
Miao Xie16cdcec2011-04-22 18:12:22 +0800940 int ret;
941
Miao Xie16cdcec2011-04-22 18:12:22 +0800942 key.objectid = node->inode_id;
David Sterba962a2982014-06-04 18:41:45 +0200943 key.type = BTRFS_INODE_ITEM_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +0800944 key.offset = 0;
Miao Xie0e8c36a2012-12-19 06:59:51 +0000945
Miao Xie67de1172013-12-26 13:07:06 +0800946 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
947 mod = -1;
948 else
949 mod = 1;
950
951 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
Josef Bacikbb385be2021-05-21 16:44:08 -0400952 if (ret > 0)
953 ret = -ENOENT;
954 if (ret < 0)
955 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +0800956
Miao Xie16cdcec2011-04-22 18:12:22 +0800957 leaf = path->nodes[0];
958 inode_item = btrfs_item_ptr(leaf, path->slots[0],
959 struct btrfs_inode_item);
960 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
961 sizeof(struct btrfs_inode_item));
962 btrfs_mark_buffer_dirty(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +0800963
Miao Xie67de1172013-12-26 13:07:06 +0800964 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
Josef Bacika4cb90d2021-05-21 16:44:07 -0400965 goto out;
Miao Xie67de1172013-12-26 13:07:06 +0800966
967 path->slots[0]++;
968 if (path->slots[0] >= btrfs_header_nritems(leaf))
969 goto search;
970again:
971 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
972 if (key.objectid != node->inode_id)
973 goto out;
974
975 if (key.type != BTRFS_INODE_REF_KEY &&
976 key.type != BTRFS_INODE_EXTREF_KEY)
977 goto out;
978
979 /*
980 * Delayed iref deletion is for the inode who has only one link,
981 * so there is only one iref. The case that several irefs are
982 * in the same item doesn't exist.
983 */
984 btrfs_del_item(trans, root, path);
985out:
986 btrfs_release_delayed_iref(node);
Miao Xie67de1172013-12-26 13:07:06 +0800987 btrfs_release_path(path);
988err_out:
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800989 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
Miao Xie16cdcec2011-04-22 18:12:22 +0800990 btrfs_release_delayed_inode(node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800991
Josef Bacik04587ad2021-05-21 16:44:09 -0400992 /*
993 * If we fail to update the delayed inode we need to abort the
994 * transaction, because we could leave the inode with the improper
995 * counts behind.
996 */
997 if (ret && ret != -ENOENT)
998 btrfs_abort_transaction(trans, ret);
999
Miao Xie67de1172013-12-26 13:07:06 +08001000 return ret;
1001
1002search:
1003 btrfs_release_path(path);
1004
David Sterba962a2982014-06-04 18:41:45 +02001005 key.type = BTRFS_INODE_EXTREF_KEY;
Miao Xie67de1172013-12-26 13:07:06 +08001006 key.offset = -1;
Josef Bacik351cbf62020-03-19 10:11:32 -04001007
Miao Xie67de1172013-12-26 13:07:06 +08001008 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1009 if (ret < 0)
1010 goto err_out;
1011 ASSERT(ret);
1012
1013 ret = 0;
1014 leaf = path->nodes[0];
1015 path->slots[0]--;
1016 goto again;
Miao Xie16cdcec2011-04-22 18:12:22 +08001017}
1018
Miao Xie0e8c36a2012-12-19 06:59:51 +00001019static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1020 struct btrfs_root *root,
1021 struct btrfs_path *path,
1022 struct btrfs_delayed_node *node)
1023{
1024 int ret;
1025
1026 mutex_lock(&node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001027 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001028 mutex_unlock(&node->mutex);
1029 return 0;
1030 }
1031
1032 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1033 mutex_unlock(&node->mutex);
1034 return ret;
1035}
1036
Miao Xie4ea41ce2012-12-19 06:59:03 +00001037static inline int
1038__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1039 struct btrfs_path *path,
1040 struct btrfs_delayed_node *node)
1041{
1042 int ret;
1043
1044 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1045 if (ret)
1046 return ret;
1047
1048 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1049 if (ret)
1050 return ret;
1051
1052 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1053 return ret;
1054}
1055
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001056/*
1057 * Called when committing the transaction.
1058 * Returns 0 on success.
1059 * Returns < 0 on error and returns with an aborted transaction with any
1060 * outstanding delayed items cleaned up.
1061 */
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001062static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
Miao Xie16cdcec2011-04-22 18:12:22 +08001063{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001064 struct btrfs_fs_info *fs_info = trans->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001065 struct btrfs_delayed_root *delayed_root;
1066 struct btrfs_delayed_node *curr_node, *prev_node;
1067 struct btrfs_path *path;
Miao Xie19fd2942011-06-15 10:47:30 +00001068 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001069 int ret = 0;
Josef Bacik96c3f432012-06-21 14:05:49 -04001070 bool count = (nr > 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001071
David Sterbabf31f872020-02-05 17:34:34 +01001072 if (TRANS_ABORTED(trans))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001073 return -EIO;
1074
Miao Xie16cdcec2011-04-22 18:12:22 +08001075 path = btrfs_alloc_path();
1076 if (!path)
1077 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001078
Miao Xie19fd2942011-06-15 10:47:30 +00001079 block_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001080 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001081
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001082 delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001083
1084 curr_node = btrfs_first_delayed_node(delayed_root);
Abaci Teama4559e62021-01-27 16:11:37 +08001085 while (curr_node && (!count || nr--)) {
Miao Xie4ea41ce2012-12-19 06:59:03 +00001086 ret = __btrfs_commit_inode_delayed_items(trans, path,
1087 curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001088 if (ret) {
1089 btrfs_release_delayed_node(curr_node);
Josef Bacik96c3f432012-06-21 14:05:49 -04001090 curr_node = NULL;
Jeff Mahoney66642832016-06-10 18:19:25 -04001091 btrfs_abort_transaction(trans, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001092 break;
1093 }
1094
1095 prev_node = curr_node;
1096 curr_node = btrfs_next_delayed_node(curr_node);
1097 btrfs_release_delayed_node(prev_node);
1098 }
1099
Josef Bacik96c3f432012-06-21 14:05:49 -04001100 if (curr_node)
1101 btrfs_release_delayed_node(curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001102 btrfs_free_path(path);
Miao Xie19fd2942011-06-15 10:47:30 +00001103 trans->block_rsv = block_rsv;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001104
Miao Xie16cdcec2011-04-22 18:12:22 +08001105 return ret;
1106}
1107
Nikolay Borisove5c304e62018-02-07 17:55:43 +02001108int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
Josef Bacik96c3f432012-06-21 14:05:49 -04001109{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001110 return __btrfs_run_delayed_items(trans, -1);
Josef Bacik96c3f432012-06-21 14:05:49 -04001111}
1112
Nikolay Borisove5c304e62018-02-07 17:55:43 +02001113int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
Josef Bacik96c3f432012-06-21 14:05:49 -04001114{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001115 return __btrfs_run_delayed_items(trans, nr);
Josef Bacik96c3f432012-06-21 14:05:49 -04001116}
1117
Miao Xie16cdcec2011-04-22 18:12:22 +08001118int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001119 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001120{
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001121 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001122 struct btrfs_path *path;
1123 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001124 int ret;
1125
1126 if (!delayed_node)
1127 return 0;
1128
1129 mutex_lock(&delayed_node->mutex);
1130 if (!delayed_node->count) {
1131 mutex_unlock(&delayed_node->mutex);
1132 btrfs_release_delayed_node(delayed_node);
1133 return 0;
1134 }
1135 mutex_unlock(&delayed_node->mutex);
1136
Miao Xie4ea41ce2012-12-19 06:59:03 +00001137 path = btrfs_alloc_path();
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001138 if (!path) {
1139 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001140 return -ENOMEM;
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001141 }
Miao Xie4ea41ce2012-12-19 06:59:03 +00001142
1143 block_rsv = trans->block_rsv;
1144 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1145
1146 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1147
Miao Xie16cdcec2011-04-22 18:12:22 +08001148 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001149 btrfs_free_path(path);
1150 trans->block_rsv = block_rsv;
1151
Miao Xie16cdcec2011-04-22 18:12:22 +08001152 return ret;
1153}
1154
Nikolay Borisovaa790212017-01-10 20:35:40 +02001155int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
Miao Xie0e8c36a2012-12-19 06:59:51 +00001156{
David Sterba3ffbd682018-06-29 10:56:42 +02001157 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001158 struct btrfs_trans_handle *trans;
Nikolay Borisovaa790212017-01-10 20:35:40 +02001159 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001160 struct btrfs_path *path;
1161 struct btrfs_block_rsv *block_rsv;
1162 int ret;
1163
1164 if (!delayed_node)
1165 return 0;
1166
1167 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001168 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001169 mutex_unlock(&delayed_node->mutex);
1170 btrfs_release_delayed_node(delayed_node);
1171 return 0;
1172 }
1173 mutex_unlock(&delayed_node->mutex);
1174
1175 trans = btrfs_join_transaction(delayed_node->root);
1176 if (IS_ERR(trans)) {
1177 ret = PTR_ERR(trans);
1178 goto out;
1179 }
1180
1181 path = btrfs_alloc_path();
1182 if (!path) {
1183 ret = -ENOMEM;
1184 goto trans_out;
1185 }
Miao Xie0e8c36a2012-12-19 06:59:51 +00001186
1187 block_rsv = trans->block_rsv;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001188 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001189
1190 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001191 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
Miao Xie0e8c36a2012-12-19 06:59:51 +00001192 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1193 path, delayed_node);
1194 else
1195 ret = 0;
1196 mutex_unlock(&delayed_node->mutex);
1197
1198 btrfs_free_path(path);
1199 trans->block_rsv = block_rsv;
1200trans_out:
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001201 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001202 btrfs_btree_balance_dirty(fs_info);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001203out:
1204 btrfs_release_delayed_node(delayed_node);
1205
1206 return ret;
1207}
1208
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001209void btrfs_remove_delayed_node(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001210{
1211 struct btrfs_delayed_node *delayed_node;
1212
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001213 delayed_node = READ_ONCE(inode->delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001214 if (!delayed_node)
1215 return;
1216
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001217 inode->delayed_node = NULL;
Miao Xie16cdcec2011-04-22 18:12:22 +08001218 btrfs_release_delayed_node(delayed_node);
1219}
1220
Chris Masonde3cb942013-03-04 17:13:31 -05001221struct btrfs_async_delayed_work {
1222 struct btrfs_delayed_root *delayed_root;
1223 int nr;
Qu Wenruod458b052014-02-28 10:46:19 +08001224 struct btrfs_work work;
Miao Xie16cdcec2011-04-22 18:12:22 +08001225};
1226
Qu Wenruod458b052014-02-28 10:46:19 +08001227static void btrfs_async_run_delayed_root(struct btrfs_work *work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001228{
Chris Masonde3cb942013-03-04 17:13:31 -05001229 struct btrfs_async_delayed_work *async_work;
1230 struct btrfs_delayed_root *delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001231 struct btrfs_trans_handle *trans;
1232 struct btrfs_path *path;
1233 struct btrfs_delayed_node *delayed_node = NULL;
1234 struct btrfs_root *root;
Miao Xie19fd2942011-06-15 10:47:30 +00001235 struct btrfs_block_rsv *block_rsv;
Chris Masonde3cb942013-03-04 17:13:31 -05001236 int total_done = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001237
Chris Masonde3cb942013-03-04 17:13:31 -05001238 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1239 delayed_root = async_work->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001240
1241 path = btrfs_alloc_path();
1242 if (!path)
1243 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +08001244
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001245 do {
1246 if (atomic_read(&delayed_root->items) <
1247 BTRFS_DELAYED_BACKGROUND / 2)
1248 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001249
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001250 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1251 if (!delayed_node)
1252 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001253
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001254 root = delayed_node->root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001255
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001256 trans = btrfs_join_transaction(root);
1257 if (IS_ERR(trans)) {
1258 btrfs_release_path(path);
1259 btrfs_release_prepared_delayed_node(delayed_node);
1260 total_done++;
1261 continue;
1262 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001263
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001264 block_rsv = trans->block_rsv;
1265 trans->block_rsv = &root->fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001266
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001267 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001268
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001269 trans->block_rsv = block_rsv;
1270 btrfs_end_transaction(trans);
1271 btrfs_btree_balance_dirty_nodelay(root->fs_info);
Chris Masonde3cb942013-03-04 17:13:31 -05001272
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001273 btrfs_release_path(path);
1274 btrfs_release_prepared_delayed_node(delayed_node);
1275 total_done++;
Chris Masonde3cb942013-03-04 17:13:31 -05001276
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001277 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1278 || total_done < async_work->nr);
Chris Masonde3cb942013-03-04 17:13:31 -05001279
Miao Xie16cdcec2011-04-22 18:12:22 +08001280 btrfs_free_path(path);
1281out:
Chris Masonde3cb942013-03-04 17:13:31 -05001282 wake_up(&delayed_root->wait);
1283 kfree(async_work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001284}
1285
Miao Xie16cdcec2011-04-22 18:12:22 +08001286
Chris Masonde3cb942013-03-04 17:13:31 -05001287static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
Daniel Dresslera585e942014-11-17 22:05:02 +09001288 struct btrfs_fs_info *fs_info, int nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001289{
1290 struct btrfs_async_delayed_work *async_work;
1291
Chris Masonde3cb942013-03-04 17:13:31 -05001292 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1293 if (!async_work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001294 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001295
Chris Masonde3cb942013-03-04 17:13:31 -05001296 async_work->delayed_root = delayed_root;
Omar Sandovala0cac0e2019-09-16 11:30:57 -07001297 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1298 NULL);
Chris Masonde3cb942013-03-04 17:13:31 -05001299 async_work->nr = nr;
Miao Xie16cdcec2011-04-22 18:12:22 +08001300
Daniel Dresslera585e942014-11-17 22:05:02 +09001301 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001302 return 0;
1303}
1304
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001305void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
Chris Masone9993762011-06-17 16:14:09 -04001306{
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001307 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
Chris Masone9993762011-06-17 16:14:09 -04001308}
1309
Miao Xie03538082013-12-26 13:07:03 +08001310static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
Chris Masonde3cb942013-03-04 17:13:31 -05001311{
1312 int val = atomic_read(&delayed_root->items_seq);
1313
Miao Xie03538082013-12-26 13:07:03 +08001314 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
Chris Masonde3cb942013-03-04 17:13:31 -05001315 return 1;
Miao Xie03538082013-12-26 13:07:03 +08001316
1317 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1318 return 1;
1319
Chris Masonde3cb942013-03-04 17:13:31 -05001320 return 0;
1321}
1322
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001323void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
Miao Xie16cdcec2011-04-22 18:12:22 +08001324{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001325 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001326
Nikolay Borisov85777872017-10-23 13:51:49 +03001327 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1328 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
Miao Xie16cdcec2011-04-22 18:12:22 +08001329 return;
1330
1331 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
Miao Xie03538082013-12-26 13:07:03 +08001332 int seq;
Miao Xie16cdcec2011-04-22 18:12:22 +08001333 int ret;
Miao Xie03538082013-12-26 13:07:03 +08001334
1335 seq = atomic_read(&delayed_root->items_seq);
Chris Masonde3cb942013-03-04 17:13:31 -05001336
Daniel Dresslera585e942014-11-17 22:05:02 +09001337 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001338 if (ret)
1339 return;
1340
Miao Xie03538082013-12-26 13:07:03 +08001341 wait_event_interruptible(delayed_root->wait,
1342 could_end_wait(delayed_root, seq));
Miao Xie4dd466d2013-12-26 13:07:02 +08001343 return;
Miao Xie16cdcec2011-04-22 18:12:22 +08001344 }
1345
Daniel Dresslera585e942014-11-17 22:05:02 +09001346 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
Miao Xie16cdcec2011-04-22 18:12:22 +08001347}
1348
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001349/* Will return 0 or -ENOMEM */
Miao Xie16cdcec2011-04-22 18:12:22 +08001350int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001351 const char *name, int name_len,
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001352 struct btrfs_inode *dir,
Miao Xie16cdcec2011-04-22 18:12:22 +08001353 struct btrfs_disk_key *disk_key, u8 type,
1354 u64 index)
1355{
1356 struct btrfs_delayed_node *delayed_node;
1357 struct btrfs_delayed_item *delayed_item;
1358 struct btrfs_dir_item *dir_item;
1359 int ret;
1360
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001361 delayed_node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001362 if (IS_ERR(delayed_node))
1363 return PTR_ERR(delayed_node);
1364
1365 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1366 if (!delayed_item) {
1367 ret = -ENOMEM;
1368 goto release_node;
1369 }
1370
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001371 delayed_item->key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001372 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001373 delayed_item->key.offset = index;
1374
1375 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1376 dir_item->location = *disk_key;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001377 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1378 btrfs_set_stack_dir_data_len(dir_item, 0);
1379 btrfs_set_stack_dir_name_len(dir_item, name_len);
1380 btrfs_set_stack_dir_type(dir_item, type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001381 memcpy((char *)(dir_item + 1), name, name_len);
1382
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001383 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -05001384 /*
1385 * we have reserved enough space when we start a new transaction,
1386 * so reserving metadata failure is impossible
1387 */
1388 BUG_ON(ret);
1389
Miao Xie16cdcec2011-04-22 18:12:22 +08001390 mutex_lock(&delayed_node->mutex);
1391 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1392 if (unlikely(ret)) {
Lu Fengqi4465c8b2018-08-01 11:32:25 +08001393 btrfs_err(trans->fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001394 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09001395 name_len, name, delayed_node->root->root_key.objectid,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001396 delayed_node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001397 BUG();
1398 }
1399 mutex_unlock(&delayed_node->mutex);
1400
1401release_node:
1402 btrfs_release_delayed_node(delayed_node);
1403 return ret;
1404}
1405
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001406static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +08001407 struct btrfs_delayed_node *node,
1408 struct btrfs_key *key)
1409{
1410 struct btrfs_delayed_item *item;
1411
1412 mutex_lock(&node->mutex);
1413 item = __btrfs_lookup_delayed_insertion_item(node, key);
1414 if (!item) {
1415 mutex_unlock(&node->mutex);
1416 return 1;
1417 }
1418
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001419 btrfs_delayed_item_release_metadata(node->root, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001420 btrfs_release_delayed_item(item);
1421 mutex_unlock(&node->mutex);
1422 return 0;
1423}
1424
1425int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001426 struct btrfs_inode *dir, u64 index)
Miao Xie16cdcec2011-04-22 18:12:22 +08001427{
1428 struct btrfs_delayed_node *node;
1429 struct btrfs_delayed_item *item;
1430 struct btrfs_key item_key;
1431 int ret;
1432
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001433 node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001434 if (IS_ERR(node))
1435 return PTR_ERR(node);
1436
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001437 item_key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001438 item_key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001439 item_key.offset = index;
1440
Lu Fengqi9add2942018-08-01 11:32:26 +08001441 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1442 &item_key);
Miao Xie16cdcec2011-04-22 18:12:22 +08001443 if (!ret)
1444 goto end;
1445
1446 item = btrfs_alloc_delayed_item(0);
1447 if (!item) {
1448 ret = -ENOMEM;
1449 goto end;
1450 }
1451
1452 item->key = item_key;
1453
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001454 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001455 /*
1456 * we have reserved enough space when we start a new transaction,
1457 * so reserving metadata failure is impossible.
1458 */
Qu Wenruo933c22a2019-07-16 17:00:32 +08001459 if (ret < 0) {
1460 btrfs_err(trans->fs_info,
1461"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1462 btrfs_release_delayed_item(item);
1463 goto end;
1464 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001465
1466 mutex_lock(&node->mutex);
1467 ret = __btrfs_add_delayed_deletion_item(node, item);
1468 if (unlikely(ret)) {
Lu Fengqi9add2942018-08-01 11:32:26 +08001469 btrfs_err(trans->fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001470 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09001471 index, node->root->root_key.objectid,
1472 node->inode_id, ret);
Qu Wenruo933c22a2019-07-16 17:00:32 +08001473 btrfs_delayed_item_release_metadata(dir->root, item);
1474 btrfs_release_delayed_item(item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001475 }
1476 mutex_unlock(&node->mutex);
1477end:
1478 btrfs_release_delayed_node(node);
1479 return ret;
1480}
1481
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001482int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001483{
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001484 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001485
1486 if (!delayed_node)
1487 return -ENOENT;
1488
1489 /*
1490 * Since we have held i_mutex of this directory, it is impossible that
1491 * a new directory index is added into the delayed node and index_cnt
1492 * is updated now. So we needn't lock the delayed node.
1493 */
Miao Xie2f7e33d2011-06-23 07:27:13 +00001494 if (!delayed_node->index_cnt) {
1495 btrfs_release_delayed_node(delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001496 return -EINVAL;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001497 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001498
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001499 inode->index_cnt = delayed_node->index_cnt;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001500 btrfs_release_delayed_node(delayed_node);
1501 return 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001502}
1503
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001504bool btrfs_readdir_get_delayed_items(struct inode *inode,
1505 struct list_head *ins_list,
1506 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001507{
1508 struct btrfs_delayed_node *delayed_node;
1509 struct btrfs_delayed_item *item;
1510
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001511 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001512 if (!delayed_node)
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001513 return false;
1514
1515 /*
1516 * We can only do one readdir with delayed items at a time because of
1517 * item->readdir_list.
1518 */
Josef Bacik64708532021-02-10 17:14:34 -05001519 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1520 btrfs_inode_lock(inode, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001521
1522 mutex_lock(&delayed_node->mutex);
1523 item = __btrfs_first_delayed_insertion_item(delayed_node);
1524 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001525 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001526 list_add_tail(&item->readdir_list, ins_list);
1527 item = __btrfs_next_delayed_item(item);
1528 }
1529
1530 item = __btrfs_first_delayed_deletion_item(delayed_node);
1531 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001532 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001533 list_add_tail(&item->readdir_list, del_list);
1534 item = __btrfs_next_delayed_item(item);
1535 }
1536 mutex_unlock(&delayed_node->mutex);
1537 /*
1538 * This delayed node is still cached in the btrfs inode, so refs
1539 * must be > 1 now, and we needn't check it is going to be freed
1540 * or not.
1541 *
1542 * Besides that, this function is used to read dir, we do not
1543 * insert/delete delayed items in this period. So we also needn't
1544 * requeue or dequeue this delayed node.
1545 */
Elena Reshetova6de5f182017-03-03 10:55:16 +02001546 refcount_dec(&delayed_node->refs);
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001547
1548 return true;
Miao Xie16cdcec2011-04-22 18:12:22 +08001549}
1550
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001551void btrfs_readdir_put_delayed_items(struct inode *inode,
1552 struct list_head *ins_list,
1553 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001554{
1555 struct btrfs_delayed_item *curr, *next;
1556
1557 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1558 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001559 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001560 kfree(curr);
1561 }
1562
1563 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1564 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001565 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001566 kfree(curr);
1567 }
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001568
1569 /*
1570 * The VFS is going to do up_read(), so we need to downgrade back to a
1571 * read lock.
1572 */
1573 downgrade_write(&inode->i_rwsem);
Miao Xie16cdcec2011-04-22 18:12:22 +08001574}
1575
1576int btrfs_should_delete_dir_index(struct list_head *del_list,
1577 u64 index)
1578{
Josef Bacike4fd4932018-01-23 15:17:05 -05001579 struct btrfs_delayed_item *curr;
1580 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001581
Josef Bacike4fd4932018-01-23 15:17:05 -05001582 list_for_each_entry(curr, del_list, readdir_list) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001583 if (curr->key.offset > index)
1584 break;
Josef Bacike4fd4932018-01-23 15:17:05 -05001585 if (curr->key.offset == index) {
1586 ret = 1;
1587 break;
1588 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001589 }
Josef Bacike4fd4932018-01-23 15:17:05 -05001590 return ret;
Miao Xie16cdcec2011-04-22 18:12:22 +08001591}
1592
1593/*
1594 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1595 *
1596 */
Al Viro9cdda8d2013-05-22 16:48:09 -04001597int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
Jeff Mahoneyd2fbb2b2016-11-05 13:26:35 -04001598 struct list_head *ins_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001599{
1600 struct btrfs_dir_item *di;
1601 struct btrfs_delayed_item *curr, *next;
1602 struct btrfs_key location;
1603 char *name;
1604 int name_len;
1605 int over = 0;
1606 unsigned char d_type;
1607
1608 if (list_empty(ins_list))
1609 return 0;
1610
1611 /*
1612 * Changing the data of the delayed item is impossible. So
1613 * we needn't lock them. And we have held i_mutex of the
1614 * directory, nobody can delete any directory indexes now.
1615 */
1616 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1617 list_del(&curr->readdir_list);
1618
Al Viro9cdda8d2013-05-22 16:48:09 -04001619 if (curr->key.offset < ctx->pos) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001620 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001621 kfree(curr);
1622 continue;
1623 }
1624
Al Viro9cdda8d2013-05-22 16:48:09 -04001625 ctx->pos = curr->key.offset;
Miao Xie16cdcec2011-04-22 18:12:22 +08001626
1627 di = (struct btrfs_dir_item *)curr->data;
1628 name = (char *)(di + 1);
Qu Wenruo3cae2102013-07-16 11:19:18 +08001629 name_len = btrfs_stack_dir_name_len(di);
Miao Xie16cdcec2011-04-22 18:12:22 +08001630
Phillip Potter7d157c32019-03-26 21:39:34 +00001631 d_type = fs_ftype_to_dtype(di->type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001632 btrfs_disk_key_to_cpu(&location, &di->location);
1633
Al Viro9cdda8d2013-05-22 16:48:09 -04001634 over = !dir_emit(ctx, name, name_len,
Miao Xie16cdcec2011-04-22 18:12:22 +08001635 location.objectid, d_type);
1636
Elena Reshetova089e77e2017-03-03 10:55:17 +02001637 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001638 kfree(curr);
1639
1640 if (over)
1641 return 1;
Josef Bacik42e9cc42017-07-24 15:14:26 -04001642 ctx->pos++;
Miao Xie16cdcec2011-04-22 18:12:22 +08001643 }
1644 return 0;
1645}
1646
Miao Xie16cdcec2011-04-22 18:12:22 +08001647static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1648 struct btrfs_inode_item *inode_item,
1649 struct inode *inode)
1650{
Boris Burkov77eea052021-06-30 13:01:48 -07001651 u64 flags;
1652
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001653 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1654 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001655 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1656 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1657 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1658 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1659 btrfs_set_stack_inode_generation(inode_item,
1660 BTRFS_I(inode)->generation);
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001661 btrfs_set_stack_inode_sequence(inode_item,
1662 inode_peek_iversion(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001663 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1664 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
Boris Burkov77eea052021-06-30 13:01:48 -07001665 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1666 BTRFS_I(inode)->ro_flags);
1667 btrfs_set_stack_inode_flags(inode_item, flags);
Chris Masonff5714c2011-05-28 07:00:39 -04001668 btrfs_set_stack_inode_block_group(inode_item, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001669
David Sterbaa937b972014-12-12 17:39:12 +01001670 btrfs_set_stack_timespec_sec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001671 inode->i_atime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001672 btrfs_set_stack_timespec_nsec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001673 inode->i_atime.tv_nsec);
1674
David Sterbaa937b972014-12-12 17:39:12 +01001675 btrfs_set_stack_timespec_sec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001676 inode->i_mtime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001677 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001678 inode->i_mtime.tv_nsec);
1679
David Sterbaa937b972014-12-12 17:39:12 +01001680 btrfs_set_stack_timespec_sec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001681 inode->i_ctime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001682 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001683 inode->i_ctime.tv_nsec);
chandan r9cc97d62012-07-04 12:48:07 +05301684
1685 btrfs_set_stack_timespec_sec(&inode_item->otime,
1686 BTRFS_I(inode)->i_otime.tv_sec);
1687 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1688 BTRFS_I(inode)->i_otime.tv_nsec);
Miao Xie16cdcec2011-04-22 18:12:22 +08001689}
1690
Miao Xie2f7e33d2011-06-23 07:27:13 +00001691int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1692{
Josef Bacik9ddc9592020-01-17 09:02:22 -05001693 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001694 struct btrfs_delayed_node *delayed_node;
1695 struct btrfs_inode_item *inode_item;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001696
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001697 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001698 if (!delayed_node)
1699 return -ENOENT;
1700
1701 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001702 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie2f7e33d2011-06-23 07:27:13 +00001703 mutex_unlock(&delayed_node->mutex);
1704 btrfs_release_delayed_node(delayed_node);
1705 return -ENOENT;
1706 }
1707
1708 inode_item = &delayed_node->inode_item;
1709
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001710 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1711 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
Nikolay Borisov6ef06d22017-02-20 13:50:34 +02001712 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
Josef Bacik9ddc9592020-01-17 09:02:22 -05001713 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1714 round_up(i_size_read(inode), fs_info->sectorsize));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001715 inode->i_mode = btrfs_stack_inode_mode(inode_item);
Miklos Szeredibfe86842011-10-28 14:13:29 +02001716 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001717 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1718 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
Yang Dongsheng6e17d302015-04-09 12:08:43 +08001719 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1720
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001721 inode_set_iversion_queried(inode,
1722 btrfs_stack_inode_sequence(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001723 inode->i_rdev = 0;
1724 *rdev = btrfs_stack_inode_rdev(inode_item);
Boris Burkov77eea052021-06-30 13:01:48 -07001725 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1726 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001727
David Sterbaa937b972014-12-12 17:39:12 +01001728 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1729 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001730
David Sterbaa937b972014-12-12 17:39:12 +01001731 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1732 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001733
David Sterbaa937b972014-12-12 17:39:12 +01001734 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1735 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001736
chandan r9cc97d62012-07-04 12:48:07 +05301737 BTRFS_I(inode)->i_otime.tv_sec =
1738 btrfs_stack_timespec_sec(&inode_item->otime);
1739 BTRFS_I(inode)->i_otime.tv_nsec =
1740 btrfs_stack_timespec_nsec(&inode_item->otime);
1741
Miao Xie2f7e33d2011-06-23 07:27:13 +00001742 inode->i_generation = BTRFS_I(inode)->generation;
1743 BTRFS_I(inode)->index_cnt = (u64)-1;
1744
1745 mutex_unlock(&delayed_node->mutex);
1746 btrfs_release_delayed_node(delayed_node);
1747 return 0;
1748}
1749
Miao Xie16cdcec2011-04-22 18:12:22 +08001750int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001751 struct btrfs_root *root,
1752 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001753{
1754 struct btrfs_delayed_node *delayed_node;
David Sterbaaa0467d2011-06-03 16:29:08 +02001755 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001756
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001757 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001758 if (IS_ERR(delayed_node))
1759 return PTR_ERR(delayed_node);
1760
1761 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001762 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001763 fill_stack_inode_item(trans, &delayed_node->inode_item,
1764 &inode->vfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001765 goto release_node;
1766 }
1767
Nikolay Borisov8e3c9d32021-02-22 18:40:46 +02001768 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
Josef Bacikc06a0e12011-11-04 19:56:02 -04001769 if (ret)
1770 goto release_node;
Miao Xie16cdcec2011-04-22 18:12:22 +08001771
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001772 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
Miao Xie7cf35d92013-12-26 13:07:05 +08001773 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +08001774 delayed_node->count++;
1775 atomic_inc(&root->fs_info->delayed_root->items);
1776release_node:
1777 mutex_unlock(&delayed_node->mutex);
1778 btrfs_release_delayed_node(delayed_node);
1779 return ret;
1780}
1781
Nikolay Borisove07222c2017-01-10 20:35:37 +02001782int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
Miao Xie67de1172013-12-26 13:07:06 +08001783{
David Sterba3ffbd682018-06-29 10:56:42 +02001784 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie67de1172013-12-26 13:07:06 +08001785 struct btrfs_delayed_node *delayed_node;
1786
Chris Mason6f896052014-12-31 12:18:29 -05001787 /*
1788 * we don't do delayed inode updates during log recovery because it
1789 * leads to enospc problems. This means we also can't do
1790 * delayed inode refs
1791 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001792 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
Chris Mason6f896052014-12-31 12:18:29 -05001793 return -EAGAIN;
1794
Nikolay Borisove07222c2017-01-10 20:35:37 +02001795 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie67de1172013-12-26 13:07:06 +08001796 if (IS_ERR(delayed_node))
1797 return PTR_ERR(delayed_node);
1798
1799 /*
1800 * We don't reserve space for inode ref deletion is because:
1801 * - We ONLY do async inode ref deletion for the inode who has only
1802 * one link(i_nlink == 1), it means there is only one inode ref.
1803 * And in most case, the inode ref and the inode item are in the
1804 * same leaf, and we will deal with them at the same time.
1805 * Since we are sure we will reserve the space for the inode item,
1806 * it is unnecessary to reserve space for inode ref deletion.
1807 * - If the inode ref and the inode item are not in the same leaf,
1808 * We also needn't worry about enospc problem, because we reserve
1809 * much more space for the inode update than it needs.
1810 * - At the worst, we can steal some space from the global reservation.
1811 * It is very rare.
1812 */
1813 mutex_lock(&delayed_node->mutex);
1814 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1815 goto release_node;
1816
1817 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1818 delayed_node->count++;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001819 atomic_inc(&fs_info->delayed_root->items);
Miao Xie67de1172013-12-26 13:07:06 +08001820release_node:
1821 mutex_unlock(&delayed_node->mutex);
1822 btrfs_release_delayed_node(delayed_node);
1823 return 0;
1824}
1825
Miao Xie16cdcec2011-04-22 18:12:22 +08001826static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1827{
1828 struct btrfs_root *root = delayed_node->root;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001829 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001830 struct btrfs_delayed_item *curr_item, *prev_item;
1831
1832 mutex_lock(&delayed_node->mutex);
1833 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1834 while (curr_item) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001835 btrfs_delayed_item_release_metadata(root, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001836 prev_item = curr_item;
1837 curr_item = __btrfs_next_delayed_item(prev_item);
1838 btrfs_release_delayed_item(prev_item);
1839 }
1840
1841 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1842 while (curr_item) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001843 btrfs_delayed_item_release_metadata(root, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001844 prev_item = curr_item;
1845 curr_item = __btrfs_next_delayed_item(prev_item);
1846 btrfs_release_delayed_item(prev_item);
1847 }
1848
Josef Bacika4cb90d2021-05-21 16:44:07 -04001849 btrfs_release_delayed_iref(delayed_node);
Miao Xie67de1172013-12-26 13:07:06 +08001850
Miao Xie7cf35d92013-12-26 13:07:05 +08001851 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001852 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
Miao Xie16cdcec2011-04-22 18:12:22 +08001853 btrfs_release_delayed_inode(delayed_node);
1854 }
1855 mutex_unlock(&delayed_node->mutex);
1856}
1857
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001858void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001859{
1860 struct btrfs_delayed_node *delayed_node;
1861
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001862 delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001863 if (!delayed_node)
1864 return;
1865
1866 __btrfs_kill_delayed_node(delayed_node);
1867 btrfs_release_delayed_node(delayed_node);
1868}
1869
1870void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1871{
1872 u64 inode_id = 0;
1873 struct btrfs_delayed_node *delayed_nodes[8];
1874 int i, n;
1875
1876 while (1) {
1877 spin_lock(&root->inode_lock);
1878 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1879 (void **)delayed_nodes, inode_id,
1880 ARRAY_SIZE(delayed_nodes));
1881 if (!n) {
1882 spin_unlock(&root->inode_lock);
1883 break;
1884 }
1885
1886 inode_id = delayed_nodes[n - 1]->inode_id + 1;
Josef Bacikbaf320b2019-09-26 08:29:32 -04001887 for (i = 0; i < n; i++) {
1888 /*
1889 * Don't increase refs in case the node is dead and
1890 * about to be removed from the tree in the loop below
1891 */
1892 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1893 delayed_nodes[i] = NULL;
1894 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001895 spin_unlock(&root->inode_lock);
1896
1897 for (i = 0; i < n; i++) {
Josef Bacikbaf320b2019-09-26 08:29:32 -04001898 if (!delayed_nodes[i])
1899 continue;
Miao Xie16cdcec2011-04-22 18:12:22 +08001900 __btrfs_kill_delayed_node(delayed_nodes[i]);
1901 btrfs_release_delayed_node(delayed_nodes[i]);
1902 }
1903 }
1904}
Miao Xie67cde342012-06-14 02:23:22 -06001905
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001906void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
Miao Xie67cde342012-06-14 02:23:22 -06001907{
Miao Xie67cde342012-06-14 02:23:22 -06001908 struct btrfs_delayed_node *curr_node, *prev_node;
1909
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001910 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
Miao Xie67cde342012-06-14 02:23:22 -06001911 while (curr_node) {
1912 __btrfs_kill_delayed_node(curr_node);
1913
1914 prev_node = curr_node;
1915 curr_node = btrfs_next_delayed_node(curr_node);
1916 btrfs_release_delayed_node(prev_node);
1917 }
1918}
1919