blob: 1a88f6214ebc013e6d84e01c33f6648b457c7df3 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Miao Xie16cdcec2011-04-22 18:12:22 +08002/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
Miao Xie16cdcec2011-04-22 18:12:22 +08005 */
6
7#include <linux/slab.h>
Jeff Laytonc7f88c42017-12-11 06:35:12 -05008#include <linux/iversion.h>
Josef Bacik351cbf62020-03-19 10:11:32 -04009#include <linux/sched/mm.h>
David Sterba602cbe92019-08-21 18:48:25 +020010#include "misc.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080011#include "delayed-inode.h"
12#include "disk-io.h"
13#include "transaction.h"
Qu Wenruo3cae2102013-07-16 11:19:18 +080014#include "ctree.h"
Qu Wenruo4f5427c2017-12-12 15:34:33 +080015#include "qgroup.h"
David Sterba1f95ec02019-09-24 19:17:17 +020016#include "locking.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080017
Chris Masonde3cb942013-03-04 17:13:31 -050018#define BTRFS_DELAYED_WRITEBACK 512
19#define BTRFS_DELAYED_BACKGROUND 128
20#define BTRFS_DELAYED_BATCH 16
Miao Xie16cdcec2011-04-22 18:12:22 +080021
22static struct kmem_cache *delayed_node_cache;
23
24int __init btrfs_delayed_inode_init(void)
25{
David Sterba837e1972012-09-07 03:00:48 -060026 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
Miao Xie16cdcec2011-04-22 18:12:22 +080027 sizeof(struct btrfs_delayed_node),
28 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +030029 SLAB_MEM_SPREAD,
Miao Xie16cdcec2011-04-22 18:12:22 +080030 NULL);
31 if (!delayed_node_cache)
32 return -ENOMEM;
33 return 0;
34}
35
David Sterbae67c7182018-02-19 17:24:18 +010036void __cold btrfs_delayed_inode_exit(void)
Miao Xie16cdcec2011-04-22 18:12:22 +080037{
Kinglong Mee5598e902016-01-29 21:36:35 +080038 kmem_cache_destroy(delayed_node_cache);
Miao Xie16cdcec2011-04-22 18:12:22 +080039}
40
41static inline void btrfs_init_delayed_node(
42 struct btrfs_delayed_node *delayed_node,
43 struct btrfs_root *root, u64 inode_id)
44{
45 delayed_node->root = root;
46 delayed_node->inode_id = inode_id;
Elena Reshetova6de5f182017-03-03 10:55:16 +020047 refcount_set(&delayed_node->refs, 0);
Liu Bo03a1d4c2018-08-23 03:51:51 +080048 delayed_node->ins_root = RB_ROOT_CACHED;
49 delayed_node->del_root = RB_ROOT_CACHED;
Miao Xie16cdcec2011-04-22 18:12:22 +080050 mutex_init(&delayed_node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +080051 INIT_LIST_HEAD(&delayed_node->n_list);
52 INIT_LIST_HEAD(&delayed_node->p_list);
Miao Xie16cdcec2011-04-22 18:12:22 +080053}
54
55static inline int btrfs_is_continuous_delayed_item(
56 struct btrfs_delayed_item *item1,
57 struct btrfs_delayed_item *item2)
58{
59 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60 item1->key.objectid == item2->key.objectid &&
61 item1->key.type == item2->key.type &&
62 item1->key.offset + 1 == item2->key.offset)
63 return 1;
64 return 0;
65}
66
David Sterbaf85b7372017-01-20 14:54:07 +010067static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
Miao Xie2f7e33d2011-06-23 07:27:13 +000069{
Miao Xie2f7e33d2011-06-23 07:27:13 +000070 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +020071 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +000072 struct btrfs_delayed_node *node;
73
Seraphime Kirkovski20c7bce2016-12-15 14:38:16 +010074 node = READ_ONCE(btrfs_inode->delayed_node);
Miao Xie2f7e33d2011-06-23 07:27:13 +000075 if (node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020076 refcount_inc(&node->refs);
Miao Xie2f7e33d2011-06-23 07:27:13 +000077 return node;
78 }
79
80 spin_lock(&root->inode_lock);
81 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
Chris Masonec35e482017-12-15 11:58:27 -080082
Miao Xie2f7e33d2011-06-23 07:27:13 +000083 if (node) {
84 if (btrfs_inode->delayed_node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020085 refcount_inc(&node->refs); /* can be accessed */
Miao Xie2f7e33d2011-06-23 07:27:13 +000086 BUG_ON(btrfs_inode->delayed_node != node);
87 spin_unlock(&root->inode_lock);
88 return node;
89 }
Chris Masonec35e482017-12-15 11:58:27 -080090
91 /*
92 * It's possible that we're racing into the middle of removing
93 * this node from the radix tree. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the radix at all; our release
96 * function is in the process of removing it.
97 *
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
102 *
103 * If this node is properly in the radix, we want to bump the
104 * refcount twice, once for the inode and once for this get
105 * operation.
106 */
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
110 } else {
111 node = NULL;
112 }
113
Miao Xie2f7e33d2011-06-23 07:27:13 +0000114 spin_unlock(&root->inode_lock);
115 return node;
116 }
117 spin_unlock(&root->inode_lock);
118
119 return NULL;
120}
121
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100122/* Will return either the node or PTR_ERR(-ENOMEM) */
Miao Xie16cdcec2011-04-22 18:12:22 +0800123static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
David Sterbaf85b7372017-01-20 14:54:07 +0100124 struct btrfs_inode *btrfs_inode)
Miao Xie16cdcec2011-04-22 18:12:22 +0800125{
126 struct btrfs_delayed_node *node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800127 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +0200128 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +0800129 int ret;
130
131again:
Nikolay Borisov340c6ca2017-01-10 20:35:32 +0200132 node = btrfs_get_delayed_node(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +0000133 if (node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800134 return node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800135
Alexandru Moise352dd9c2015-10-25 20:15:06 +0000136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800137 if (!node)
138 return ERR_PTR(-ENOMEM);
Chris Mason0d0ca302011-05-22 07:11:22 -0400139 btrfs_init_delayed_node(node, root, ino);
Miao Xie16cdcec2011-04-22 18:12:22 +0800140
Rashika95e94d12013-10-31 03:12:42 +0530141 /* cached in the btrfs inode and can be accessed */
Elena Reshetova6de5f182017-03-03 10:55:16 +0200142 refcount_set(&node->refs, 2);
Miao Xie16cdcec2011-04-22 18:12:22 +0800143
David Sterbae1860a72016-05-09 14:11:38 +0200144 ret = radix_tree_preload(GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800145 if (ret) {
146 kmem_cache_free(delayed_node_cache, node);
147 return ERR_PTR(ret);
148 }
149
150 spin_lock(&root->inode_lock);
Chris Mason0d0ca302011-05-22 07:11:22 -0400151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800152 if (ret == -EEXIST) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800153 spin_unlock(&root->inode_lock);
Jeff Mahoney96493032014-05-27 13:53:20 -0400154 kmem_cache_free(delayed_node_cache, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800155 radix_tree_preload_end();
156 goto again;
157 }
158 btrfs_inode->delayed_node = node;
159 spin_unlock(&root->inode_lock);
160 radix_tree_preload_end();
161
162 return node;
163}
164
165/*
166 * Call it when holding delayed_node->mutex
167 *
168 * If mod = 1, add this node into the prepared list.
169 */
170static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 struct btrfs_delayed_node *node,
172 int mod)
173{
174 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800175 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800176 if (!list_empty(&node->p_list))
177 list_move_tail(&node->p_list, &root->prepare_list);
178 else if (mod)
179 list_add_tail(&node->p_list, &root->prepare_list);
180 } else {
181 list_add_tail(&node->n_list, &root->node_list);
182 list_add_tail(&node->p_list, &root->prepare_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200183 refcount_inc(&node->refs); /* inserted into list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800184 root->nodes++;
Miao Xie7cf35d92013-12-26 13:07:05 +0800185 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800186 }
187 spin_unlock(&root->lock);
188}
189
190/* Call it when holding delayed_node->mutex */
191static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 struct btrfs_delayed_node *node)
193{
194 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800195 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800196 root->nodes--;
Elena Reshetova6de5f182017-03-03 10:55:16 +0200197 refcount_dec(&node->refs); /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800198 list_del_init(&node->n_list);
199 if (!list_empty(&node->p_list))
200 list_del_init(&node->p_list);
Miao Xie7cf35d92013-12-26 13:07:05 +0800201 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800202 }
203 spin_unlock(&root->lock);
204}
205
Eric Sandeen48a3b632013-04-25 20:41:01 +0000206static struct btrfs_delayed_node *btrfs_first_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800207 struct btrfs_delayed_root *delayed_root)
208{
209 struct list_head *p;
210 struct btrfs_delayed_node *node = NULL;
211
212 spin_lock(&delayed_root->lock);
213 if (list_empty(&delayed_root->node_list))
214 goto out;
215
216 p = delayed_root->node_list.next;
217 node = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200218 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800219out:
220 spin_unlock(&delayed_root->lock);
221
222 return node;
223}
224
Eric Sandeen48a3b632013-04-25 20:41:01 +0000225static struct btrfs_delayed_node *btrfs_next_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800226 struct btrfs_delayed_node *node)
227{
228 struct btrfs_delayed_root *delayed_root;
229 struct list_head *p;
230 struct btrfs_delayed_node *next = NULL;
231
232 delayed_root = node->root->fs_info->delayed_root;
233 spin_lock(&delayed_root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800234 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
235 /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800236 if (list_empty(&delayed_root->node_list))
237 goto out;
238 p = delayed_root->node_list.next;
239 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 goto out;
241 else
242 p = node->n_list.next;
243
244 next = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200245 refcount_inc(&next->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800246out:
247 spin_unlock(&delayed_root->lock);
248
249 return next;
250}
251
252static void __btrfs_release_delayed_node(
253 struct btrfs_delayed_node *delayed_node,
254 int mod)
255{
256 struct btrfs_delayed_root *delayed_root;
257
258 if (!delayed_node)
259 return;
260
261 delayed_root = delayed_node->root->fs_info->delayed_root;
262
263 mutex_lock(&delayed_node->mutex);
264 if (delayed_node->count)
265 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266 else
267 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
268 mutex_unlock(&delayed_node->mutex);
269
Elena Reshetova6de5f182017-03-03 10:55:16 +0200270 if (refcount_dec_and_test(&delayed_node->refs)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800271 struct btrfs_root *root = delayed_node->root;
Chris Masonec35e482017-12-15 11:58:27 -0800272
Miao Xie16cdcec2011-04-22 18:12:22 +0800273 spin_lock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800274 /*
275 * Once our refcount goes to zero, nobody is allowed to bump it
276 * back up. We can delete it now.
277 */
278 ASSERT(refcount_read(&delayed_node->refs) == 0);
279 radix_tree_delete(&root->delayed_nodes_tree,
280 delayed_node->inode_id);
Miao Xie16cdcec2011-04-22 18:12:22 +0800281 spin_unlock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800282 kmem_cache_free(delayed_node_cache, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800283 }
284}
285
286static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
287{
288 __btrfs_release_delayed_node(node, 0);
289}
290
Eric Sandeen48a3b632013-04-25 20:41:01 +0000291static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800292 struct btrfs_delayed_root *delayed_root)
293{
294 struct list_head *p;
295 struct btrfs_delayed_node *node = NULL;
296
297 spin_lock(&delayed_root->lock);
298 if (list_empty(&delayed_root->prepare_list))
299 goto out;
300
301 p = delayed_root->prepare_list.next;
302 list_del_init(p);
303 node = list_entry(p, struct btrfs_delayed_node, p_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200304 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800305out:
306 spin_unlock(&delayed_root->lock);
307
308 return node;
309}
310
311static inline void btrfs_release_prepared_delayed_node(
312 struct btrfs_delayed_node *node)
313{
314 __btrfs_release_delayed_node(node, 1);
315}
316
Eric Sandeen48a3b632013-04-25 20:41:01 +0000317static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
Miao Xie16cdcec2011-04-22 18:12:22 +0800318{
319 struct btrfs_delayed_item *item;
320 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
321 if (item) {
322 item->data_len = data_len;
323 item->ins_or_del = 0;
324 item->bytes_reserved = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800325 item->delayed_node = NULL;
Elena Reshetova089e77e2017-03-03 10:55:17 +0200326 refcount_set(&item->refs, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800327 }
328 return item;
329}
330
331/*
332 * __btrfs_lookup_delayed_item - look up the delayed item by key
333 * @delayed_node: pointer to the delayed node
334 * @key: the key to look up
335 * @prev: used to store the prev item if the right item isn't found
336 * @next: used to store the next item if the right item isn't found
337 *
338 * Note: if we don't find the right item, we will return the prev item and
339 * the next item.
340 */
341static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
342 struct rb_root *root,
343 struct btrfs_key *key,
344 struct btrfs_delayed_item **prev,
345 struct btrfs_delayed_item **next)
346{
347 struct rb_node *node, *prev_node = NULL;
348 struct btrfs_delayed_item *delayed_item = NULL;
349 int ret = 0;
350
351 node = root->rb_node;
352
353 while (node) {
354 delayed_item = rb_entry(node, struct btrfs_delayed_item,
355 rb_node);
356 prev_node = node;
357 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
358 if (ret < 0)
359 node = node->rb_right;
360 else if (ret > 0)
361 node = node->rb_left;
362 else
363 return delayed_item;
364 }
365
366 if (prev) {
367 if (!prev_node)
368 *prev = NULL;
369 else if (ret < 0)
370 *prev = delayed_item;
371 else if ((node = rb_prev(prev_node)) != NULL) {
372 *prev = rb_entry(node, struct btrfs_delayed_item,
373 rb_node);
374 } else
375 *prev = NULL;
376 }
377
378 if (next) {
379 if (!prev_node)
380 *next = NULL;
381 else if (ret > 0)
382 *next = delayed_item;
383 else if ((node = rb_next(prev_node)) != NULL) {
384 *next = rb_entry(node, struct btrfs_delayed_item,
385 rb_node);
386 } else
387 *next = NULL;
388 }
389 return NULL;
390}
391
Eric Sandeen48a3b632013-04-25 20:41:01 +0000392static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800393 struct btrfs_delayed_node *delayed_node,
394 struct btrfs_key *key)
395{
Liu Bo03a1d4c2018-08-23 03:51:51 +0800396 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
Miao Xie16cdcec2011-04-22 18:12:22 +0800397 NULL, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800398}
399
Miao Xie16cdcec2011-04-22 18:12:22 +0800400static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
401 struct btrfs_delayed_item *ins,
402 int action)
403{
404 struct rb_node **p, *node;
405 struct rb_node *parent_node = NULL;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800406 struct rb_root_cached *root;
Miao Xie16cdcec2011-04-22 18:12:22 +0800407 struct btrfs_delayed_item *item;
408 int cmp;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800409 bool leftmost = true;
Miao Xie16cdcec2011-04-22 18:12:22 +0800410
411 if (action == BTRFS_DELAYED_INSERTION_ITEM)
412 root = &delayed_node->ins_root;
413 else if (action == BTRFS_DELAYED_DELETION_ITEM)
414 root = &delayed_node->del_root;
415 else
416 BUG();
Liu Bo03a1d4c2018-08-23 03:51:51 +0800417 p = &root->rb_root.rb_node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800418 node = &ins->rb_node;
419
420 while (*p) {
421 parent_node = *p;
422 item = rb_entry(parent_node, struct btrfs_delayed_item,
423 rb_node);
424
425 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
Liu Bo03a1d4c2018-08-23 03:51:51 +0800426 if (cmp < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800427 p = &(*p)->rb_right;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800428 leftmost = false;
429 } else if (cmp > 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800430 p = &(*p)->rb_left;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800431 } else {
Miao Xie16cdcec2011-04-22 18:12:22 +0800432 return -EEXIST;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800433 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800434 }
435
436 rb_link_node(node, parent_node, p);
Liu Bo03a1d4c2018-08-23 03:51:51 +0800437 rb_insert_color_cached(node, root, leftmost);
Miao Xie16cdcec2011-04-22 18:12:22 +0800438 ins->delayed_node = delayed_node;
439 ins->ins_or_del = action;
440
441 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
442 action == BTRFS_DELAYED_INSERTION_ITEM &&
443 ins->key.offset >= delayed_node->index_cnt)
444 delayed_node->index_cnt = ins->key.offset + 1;
445
446 delayed_node->count++;
447 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
448 return 0;
449}
450
451static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
452 struct btrfs_delayed_item *item)
453{
454 return __btrfs_add_delayed_item(node, item,
455 BTRFS_DELAYED_INSERTION_ITEM);
456}
457
458static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
459 struct btrfs_delayed_item *item)
460{
461 return __btrfs_add_delayed_item(node, item,
462 BTRFS_DELAYED_DELETION_ITEM);
463}
464
Chris Masonde3cb942013-03-04 17:13:31 -0500465static void finish_one_item(struct btrfs_delayed_root *delayed_root)
466{
467 int seq = atomic_inc_return(&delayed_root->items_seq);
David Sterbaee863952015-02-16 19:41:40 +0100468
David Sterba093258e2018-02-26 16:15:17 +0100469 /* atomic_dec_return implies a barrier */
Chris Masonde3cb942013-03-04 17:13:31 -0500470 if ((atomic_dec_return(&delayed_root->items) <
David Sterba093258e2018-02-26 16:15:17 +0100471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
472 cond_wake_up_nomb(&delayed_root->wait);
Chris Masonde3cb942013-03-04 17:13:31 -0500473}
474
Miao Xie16cdcec2011-04-22 18:12:22 +0800475static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
476{
Liu Bo03a1d4c2018-08-23 03:51:51 +0800477 struct rb_root_cached *root;
Miao Xie16cdcec2011-04-22 18:12:22 +0800478 struct btrfs_delayed_root *delayed_root;
479
Qu Wenruo933c22a2019-07-16 17:00:32 +0800480 /* Not associated with any delayed_node */
481 if (!delayed_item->delayed_node)
482 return;
Miao Xie16cdcec2011-04-22 18:12:22 +0800483 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
484
485 BUG_ON(!delayed_root);
486 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
487 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
488
489 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
490 root = &delayed_item->delayed_node->ins_root;
491 else
492 root = &delayed_item->delayed_node->del_root;
493
Liu Bo03a1d4c2018-08-23 03:51:51 +0800494 rb_erase_cached(&delayed_item->rb_node, root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800495 delayed_item->delayed_node->count--;
Chris Masonde3cb942013-03-04 17:13:31 -0500496
497 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800498}
499
500static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
501{
502 if (item) {
503 __btrfs_remove_delayed_item(item);
Elena Reshetova089e77e2017-03-03 10:55:17 +0200504 if (refcount_dec_and_test(&item->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +0800505 kfree(item);
506 }
507}
508
Eric Sandeen48a3b632013-04-25 20:41:01 +0000509static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800510 struct btrfs_delayed_node *delayed_node)
511{
512 struct rb_node *p;
513 struct btrfs_delayed_item *item = NULL;
514
Liu Bo03a1d4c2018-08-23 03:51:51 +0800515 p = rb_first_cached(&delayed_node->ins_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800516 if (p)
517 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
518
519 return item;
520}
521
Eric Sandeen48a3b632013-04-25 20:41:01 +0000522static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800523 struct btrfs_delayed_node *delayed_node)
524{
525 struct rb_node *p;
526 struct btrfs_delayed_item *item = NULL;
527
Liu Bo03a1d4c2018-08-23 03:51:51 +0800528 p = rb_first_cached(&delayed_node->del_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800529 if (p)
530 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
531
532 return item;
533}
534
Eric Sandeen48a3b632013-04-25 20:41:01 +0000535static struct btrfs_delayed_item *__btrfs_next_delayed_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800536 struct btrfs_delayed_item *item)
537{
538 struct rb_node *p;
539 struct btrfs_delayed_item *next = NULL;
540
541 p = rb_next(&item->rb_node);
542 if (p)
543 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
544
545 return next;
546}
547
Miao Xie16cdcec2011-04-22 18:12:22 +0800548static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800549 struct btrfs_root *root,
Miao Xie16cdcec2011-04-22 18:12:22 +0800550 struct btrfs_delayed_item *item)
551{
552 struct btrfs_block_rsv *src_rsv;
553 struct btrfs_block_rsv *dst_rsv;
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800554 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800555 u64 num_bytes;
556 int ret;
557
558 if (!trans->bytes_reserved)
559 return 0;
560
561 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400562 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800563
Josef Bacik2bd36e72019-08-22 15:14:33 -0400564 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
Qu Wenruof218ea62018-04-17 16:52:45 +0800565
566 /*
567 * Here we migrate space rsv from transaction rsv, since have already
568 * reserved space when starting a transaction. So no need to reserve
569 * qgroup space here.
570 */
Lu Fengqi3a584172018-08-04 21:10:55 +0800571 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500572 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400573 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500574 item->key.objectid,
575 num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800576 item->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500577 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800578
579 return ret;
580}
581
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800582static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
Miao Xie16cdcec2011-04-22 18:12:22 +0800583 struct btrfs_delayed_item *item)
584{
Miao Xie19fd2942011-06-15 10:47:30 +0000585 struct btrfs_block_rsv *rsv;
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800586 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie19fd2942011-06-15 10:47:30 +0000587
Miao Xie16cdcec2011-04-22 18:12:22 +0800588 if (!item->bytes_reserved)
589 return;
590
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400591 rsv = &fs_info->delayed_block_rsv;
Qu Wenruof218ea62018-04-17 16:52:45 +0800592 /*
593 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
594 * to release/reserve qgroup space.
595 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400596 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500597 item->key.objectid, item->bytes_reserved,
598 0);
Nikolay Borisov63f018b2020-03-10 10:59:31 +0200599 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800600}
601
602static int btrfs_delayed_inode_reserve_metadata(
603 struct btrfs_trans_handle *trans,
604 struct btrfs_root *root,
605 struct btrfs_delayed_node *node)
606{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400607 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800608 struct btrfs_block_rsv *src_rsv;
609 struct btrfs_block_rsv *dst_rsv;
610 u64 num_bytes;
611 int ret;
612
Miao Xie16cdcec2011-04-22 18:12:22 +0800613 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400614 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800615
Josef Bacikbcacf5f32019-08-22 15:14:34 -0400616 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400617
618 /*
619 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
620 * which doesn't reserve space for speed. This is a problem since we
621 * still need to reserve space for this update, so try to reserve the
622 * space.
623 *
624 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
Josef Bacik69fe2d72017-10-19 14:15:57 -0400625 * we always reserve enough to update the inode item.
Josef Bacikc06a0e12011-11-04 19:56:02 -0400626 */
Chris Masone755d9a2011-12-15 13:36:29 -0500627 if (!src_rsv || (!trans->bytes_reserved &&
Miao Xie66d8f3d2012-09-06 04:02:28 -0600628 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
Nikolay Borisov4d14c5c2021-02-22 18:40:44 +0200629 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
630 BTRFS_QGROUP_RSV_META_PREALLOC, true);
Qu Wenruof218ea62018-04-17 16:52:45 +0800631 if (ret < 0)
632 return ret;
Miao Xie08e007d2012-10-16 11:33:38 +0000633 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
634 BTRFS_RESERVE_NO_FLUSH);
Nikolay Borisov98686ff2021-02-22 18:40:47 +0200635 /* NO_FLUSH could only fail with -ENOSPC */
636 ASSERT(ret == 0 || ret == -ENOSPC);
637 if (ret)
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800638 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
Nikolay Borisov98686ff2021-02-22 18:40:47 +0200639 } else {
640 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400641 }
642
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500643 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400644 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Nikolay Borisov8e3c9d32021-02-22 18:40:46 +0200645 node->inode_id, num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800646 node->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500647 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800648
649 return ret;
650}
651
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400652static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800653 struct btrfs_delayed_node *node,
654 bool qgroup_free)
Miao Xie16cdcec2011-04-22 18:12:22 +0800655{
656 struct btrfs_block_rsv *rsv;
657
658 if (!node->bytes_reserved)
659 return;
660
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400661 rsv = &fs_info->delayed_block_rsv;
662 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500663 node->inode_id, node->bytes_reserved, 0);
Nikolay Borisov63f018b2020-03-10 10:59:31 +0200664 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800665 if (qgroup_free)
666 btrfs_qgroup_free_meta_prealloc(node->root,
667 node->bytes_reserved);
668 else
669 btrfs_qgroup_convert_reserved_meta(node->root,
670 node->bytes_reserved);
Miao Xie16cdcec2011-04-22 18:12:22 +0800671 node->bytes_reserved = 0;
672}
673
674/*
675 * This helper will insert some continuous items into the same leaf according
676 * to the free space of the leaf.
677 */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000678static int btrfs_batch_insert_items(struct btrfs_root *root,
679 struct btrfs_path *path,
680 struct btrfs_delayed_item *item)
Miao Xie16cdcec2011-04-22 18:12:22 +0800681{
682 struct btrfs_delayed_item *curr, *next;
683 int free_space;
684 int total_data_size = 0, total_size = 0;
685 struct extent_buffer *leaf;
686 char *data_ptr;
687 struct btrfs_key *keys;
688 u32 *data_size;
689 struct list_head head;
690 int slot;
691 int nitems;
692 int i;
693 int ret = 0;
694
695 BUG_ON(!path->nodes[0]);
696
697 leaf = path->nodes[0];
David Sterbae902baa2019-03-20 14:36:46 +0100698 free_space = btrfs_leaf_free_space(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +0800699 INIT_LIST_HEAD(&head);
700
701 next = item;
Chris Mason17aca1c2011-06-03 01:13:45 -0400702 nitems = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800703
704 /*
705 * count the number of the continuous items that we can insert in batch
706 */
707 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
708 free_space) {
709 total_data_size += next->data_len;
710 total_size += next->data_len + sizeof(struct btrfs_item);
711 list_add_tail(&next->tree_list, &head);
712 nitems++;
713
714 curr = next;
715 next = __btrfs_next_delayed_item(curr);
716 if (!next)
717 break;
718
719 if (!btrfs_is_continuous_delayed_item(curr, next))
720 break;
721 }
722
723 if (!nitems) {
724 ret = 0;
725 goto out;
726 }
727
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530728 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800729 if (!keys) {
730 ret = -ENOMEM;
731 goto out;
732 }
733
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530734 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800735 if (!data_size) {
736 ret = -ENOMEM;
737 goto error;
738 }
739
740 /* get keys of all the delayed items */
741 i = 0;
742 list_for_each_entry(next, &head, tree_list) {
743 keys[i] = next->key;
744 data_size[i] = next->data_len;
745 i++;
746 }
747
Miao Xie16cdcec2011-04-22 18:12:22 +0800748 /* insert the keys of the items */
Nikolay Borisovfc0d82e2020-09-01 17:39:59 +0300749 setup_items_for_insert(root, path, keys, data_size, nitems);
Miao Xie16cdcec2011-04-22 18:12:22 +0800750
751 /* insert the dir index items */
752 slot = path->slots[0];
753 list_for_each_entry_safe(curr, next, &head, tree_list) {
754 data_ptr = btrfs_item_ptr(leaf, slot, char);
755 write_extent_buffer(leaf, &curr->data,
756 (unsigned long)data_ptr,
757 curr->data_len);
758 slot++;
759
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800760 btrfs_delayed_item_release_metadata(root, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800761
762 list_del(&curr->tree_list);
763 btrfs_release_delayed_item(curr);
764 }
765
766error:
767 kfree(data_size);
768 kfree(keys);
769out:
770 return ret;
771}
772
773/*
774 * This helper can just do simple insertion that needn't extend item for new
775 * data, such as directory name index insertion, inode insertion.
776 */
777static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
778 struct btrfs_root *root,
779 struct btrfs_path *path,
780 struct btrfs_delayed_item *delayed_item)
781{
782 struct extent_buffer *leaf;
Josef Bacik351cbf62020-03-19 10:11:32 -0400783 unsigned int nofs_flag;
Miao Xie16cdcec2011-04-22 18:12:22 +0800784 char *ptr;
785 int ret;
786
Josef Bacik351cbf62020-03-19 10:11:32 -0400787 nofs_flag = memalloc_nofs_save();
Miao Xie16cdcec2011-04-22 18:12:22 +0800788 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
789 delayed_item->data_len);
Josef Bacik351cbf62020-03-19 10:11:32 -0400790 memalloc_nofs_restore(nofs_flag);
Miao Xie16cdcec2011-04-22 18:12:22 +0800791 if (ret < 0 && ret != -EEXIST)
792 return ret;
793
794 leaf = path->nodes[0];
795
Miao Xie16cdcec2011-04-22 18:12:22 +0800796 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
797
798 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
799 delayed_item->data_len);
800 btrfs_mark_buffer_dirty(leaf);
801
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800802 btrfs_delayed_item_release_metadata(root, delayed_item);
Miao Xie16cdcec2011-04-22 18:12:22 +0800803 return 0;
804}
805
806/*
807 * we insert an item first, then if there are some continuous items, we try
808 * to insert those items into the same leaf.
809 */
810static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
811 struct btrfs_path *path,
812 struct btrfs_root *root,
813 struct btrfs_delayed_node *node)
814{
815 struct btrfs_delayed_item *curr, *prev;
816 int ret = 0;
817
818do_again:
819 mutex_lock(&node->mutex);
820 curr = __btrfs_first_delayed_insertion_item(node);
821 if (!curr)
822 goto insert_end;
823
824 ret = btrfs_insert_delayed_item(trans, root, path, curr);
825 if (ret < 0) {
Chris Mason945d8962011-05-22 12:33:42 -0400826 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800827 goto insert_end;
828 }
829
830 prev = curr;
831 curr = __btrfs_next_delayed_item(prev);
832 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
833 /* insert the continuous items into the same leaf */
834 path->slots[0]++;
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000835 btrfs_batch_insert_items(root, path, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800836 }
837 btrfs_release_delayed_item(prev);
838 btrfs_mark_buffer_dirty(path->nodes[0]);
839
Chris Mason945d8962011-05-22 12:33:42 -0400840 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800841 mutex_unlock(&node->mutex);
842 goto do_again;
843
844insert_end:
845 mutex_unlock(&node->mutex);
846 return ret;
847}
848
849static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
850 struct btrfs_root *root,
851 struct btrfs_path *path,
852 struct btrfs_delayed_item *item)
853{
854 struct btrfs_delayed_item *curr, *next;
855 struct extent_buffer *leaf;
856 struct btrfs_key key;
857 struct list_head head;
858 int nitems, i, last_item;
859 int ret = 0;
860
861 BUG_ON(!path->nodes[0]);
862
863 leaf = path->nodes[0];
864
865 i = path->slots[0];
866 last_item = btrfs_header_nritems(leaf) - 1;
867 if (i > last_item)
868 return -ENOENT; /* FIXME: Is errno suitable? */
869
870 next = item;
871 INIT_LIST_HEAD(&head);
872 btrfs_item_key_to_cpu(leaf, &key, i);
873 nitems = 0;
874 /*
875 * count the number of the dir index items that we can delete in batch
876 */
877 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
878 list_add_tail(&next->tree_list, &head);
879 nitems++;
880
881 curr = next;
882 next = __btrfs_next_delayed_item(curr);
883 if (!next)
884 break;
885
886 if (!btrfs_is_continuous_delayed_item(curr, next))
887 break;
888
889 i++;
890 if (i > last_item)
891 break;
892 btrfs_item_key_to_cpu(leaf, &key, i);
893 }
894
895 if (!nitems)
896 return 0;
897
898 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
899 if (ret)
900 goto out;
901
902 list_for_each_entry_safe(curr, next, &head, tree_list) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800903 btrfs_delayed_item_release_metadata(root, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800904 list_del(&curr->tree_list);
905 btrfs_release_delayed_item(curr);
906 }
907
908out:
909 return ret;
910}
911
912static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
913 struct btrfs_path *path,
914 struct btrfs_root *root,
915 struct btrfs_delayed_node *node)
916{
917 struct btrfs_delayed_item *curr, *prev;
Josef Bacik351cbf62020-03-19 10:11:32 -0400918 unsigned int nofs_flag;
Miao Xie16cdcec2011-04-22 18:12:22 +0800919 int ret = 0;
920
921do_again:
922 mutex_lock(&node->mutex);
923 curr = __btrfs_first_delayed_deletion_item(node);
924 if (!curr)
925 goto delete_fail;
926
Josef Bacik351cbf62020-03-19 10:11:32 -0400927 nofs_flag = memalloc_nofs_save();
Miao Xie16cdcec2011-04-22 18:12:22 +0800928 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
Josef Bacik351cbf62020-03-19 10:11:32 -0400929 memalloc_nofs_restore(nofs_flag);
Miao Xie16cdcec2011-04-22 18:12:22 +0800930 if (ret < 0)
931 goto delete_fail;
932 else if (ret > 0) {
933 /*
934 * can't find the item which the node points to, so this node
935 * is invalid, just drop it.
936 */
937 prev = curr;
938 curr = __btrfs_next_delayed_item(prev);
939 btrfs_release_delayed_item(prev);
940 ret = 0;
Chris Mason945d8962011-05-22 12:33:42 -0400941 btrfs_release_path(path);
Fengguang Wu62095262012-08-04 01:45:02 -0600942 if (curr) {
943 mutex_unlock(&node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +0800944 goto do_again;
Fengguang Wu62095262012-08-04 01:45:02 -0600945 } else
Miao Xie16cdcec2011-04-22 18:12:22 +0800946 goto delete_fail;
947 }
948
949 btrfs_batch_delete_items(trans, root, path, curr);
Chris Mason945d8962011-05-22 12:33:42 -0400950 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800951 mutex_unlock(&node->mutex);
952 goto do_again;
953
954delete_fail:
Chris Mason945d8962011-05-22 12:33:42 -0400955 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800956 mutex_unlock(&node->mutex);
957 return ret;
958}
959
960static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
961{
962 struct btrfs_delayed_root *delayed_root;
963
Miao Xie7cf35d92013-12-26 13:07:05 +0800964 if (delayed_node &&
965 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800966 BUG_ON(!delayed_node->root);
Miao Xie7cf35d92013-12-26 13:07:05 +0800967 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800968 delayed_node->count--;
969
970 delayed_root = delayed_node->root->fs_info->delayed_root;
Chris Masonde3cb942013-03-04 17:13:31 -0500971 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800972 }
973}
974
Miao Xie67de1172013-12-26 13:07:06 +0800975static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
976{
977 struct btrfs_delayed_root *delayed_root;
978
979 ASSERT(delayed_node->root);
980 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
981 delayed_node->count--;
982
983 delayed_root = delayed_node->root->fs_info->delayed_root;
984 finish_one_item(delayed_root);
985}
986
Miao Xie0e8c36a2012-12-19 06:59:51 +0000987static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
988 struct btrfs_root *root,
989 struct btrfs_path *path,
990 struct btrfs_delayed_node *node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800991{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400992 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800993 struct btrfs_key key;
994 struct btrfs_inode_item *inode_item;
995 struct extent_buffer *leaf;
Josef Bacik351cbf62020-03-19 10:11:32 -0400996 unsigned int nofs_flag;
Miao Xie67de1172013-12-26 13:07:06 +0800997 int mod;
Miao Xie16cdcec2011-04-22 18:12:22 +0800998 int ret;
999
Miao Xie16cdcec2011-04-22 18:12:22 +08001000 key.objectid = node->inode_id;
David Sterba962a2982014-06-04 18:41:45 +02001001 key.type = BTRFS_INODE_ITEM_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001002 key.offset = 0;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001003
Miao Xie67de1172013-12-26 13:07:06 +08001004 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1005 mod = -1;
1006 else
1007 mod = 1;
1008
Josef Bacik351cbf62020-03-19 10:11:32 -04001009 nofs_flag = memalloc_nofs_save();
Miao Xie67de1172013-12-26 13:07:06 +08001010 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
Josef Bacik351cbf62020-03-19 10:11:32 -04001011 memalloc_nofs_restore(nofs_flag);
Miao Xie16cdcec2011-04-22 18:12:22 +08001012 if (ret > 0) {
Chris Mason945d8962011-05-22 12:33:42 -04001013 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +08001014 return -ENOENT;
1015 } else if (ret < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001016 return ret;
1017 }
1018
Miao Xie16cdcec2011-04-22 18:12:22 +08001019 leaf = path->nodes[0];
1020 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1021 struct btrfs_inode_item);
1022 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1023 sizeof(struct btrfs_inode_item));
1024 btrfs_mark_buffer_dirty(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +08001025
Miao Xie67de1172013-12-26 13:07:06 +08001026 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1027 goto no_iref;
1028
1029 path->slots[0]++;
1030 if (path->slots[0] >= btrfs_header_nritems(leaf))
1031 goto search;
1032again:
1033 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1034 if (key.objectid != node->inode_id)
1035 goto out;
1036
1037 if (key.type != BTRFS_INODE_REF_KEY &&
1038 key.type != BTRFS_INODE_EXTREF_KEY)
1039 goto out;
1040
1041 /*
1042 * Delayed iref deletion is for the inode who has only one link,
1043 * so there is only one iref. The case that several irefs are
1044 * in the same item doesn't exist.
1045 */
1046 btrfs_del_item(trans, root, path);
1047out:
1048 btrfs_release_delayed_iref(node);
1049no_iref:
1050 btrfs_release_path(path);
1051err_out:
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001052 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
Miao Xie16cdcec2011-04-22 18:12:22 +08001053 btrfs_release_delayed_inode(node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001054
Miao Xie67de1172013-12-26 13:07:06 +08001055 return ret;
1056
1057search:
1058 btrfs_release_path(path);
1059
David Sterba962a2982014-06-04 18:41:45 +02001060 key.type = BTRFS_INODE_EXTREF_KEY;
Miao Xie67de1172013-12-26 13:07:06 +08001061 key.offset = -1;
Josef Bacik351cbf62020-03-19 10:11:32 -04001062
1063 nofs_flag = memalloc_nofs_save();
Miao Xie67de1172013-12-26 13:07:06 +08001064 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Josef Bacik351cbf62020-03-19 10:11:32 -04001065 memalloc_nofs_restore(nofs_flag);
Miao Xie67de1172013-12-26 13:07:06 +08001066 if (ret < 0)
1067 goto err_out;
1068 ASSERT(ret);
1069
1070 ret = 0;
1071 leaf = path->nodes[0];
1072 path->slots[0]--;
1073 goto again;
Miao Xie16cdcec2011-04-22 18:12:22 +08001074}
1075
Miao Xie0e8c36a2012-12-19 06:59:51 +00001076static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1077 struct btrfs_root *root,
1078 struct btrfs_path *path,
1079 struct btrfs_delayed_node *node)
1080{
1081 int ret;
1082
1083 mutex_lock(&node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001084 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001085 mutex_unlock(&node->mutex);
1086 return 0;
1087 }
1088
1089 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1090 mutex_unlock(&node->mutex);
1091 return ret;
1092}
1093
Miao Xie4ea41ce2012-12-19 06:59:03 +00001094static inline int
1095__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1096 struct btrfs_path *path,
1097 struct btrfs_delayed_node *node)
1098{
1099 int ret;
1100
1101 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1102 if (ret)
1103 return ret;
1104
1105 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1106 if (ret)
1107 return ret;
1108
1109 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1110 return ret;
1111}
1112
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001113/*
1114 * Called when committing the transaction.
1115 * Returns 0 on success.
1116 * Returns < 0 on error and returns with an aborted transaction with any
1117 * outstanding delayed items cleaned up.
1118 */
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001119static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
Miao Xie16cdcec2011-04-22 18:12:22 +08001120{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001121 struct btrfs_fs_info *fs_info = trans->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001122 struct btrfs_delayed_root *delayed_root;
1123 struct btrfs_delayed_node *curr_node, *prev_node;
1124 struct btrfs_path *path;
Miao Xie19fd2942011-06-15 10:47:30 +00001125 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001126 int ret = 0;
Josef Bacik96c3f432012-06-21 14:05:49 -04001127 bool count = (nr > 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001128
David Sterbabf31f872020-02-05 17:34:34 +01001129 if (TRANS_ABORTED(trans))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001130 return -EIO;
1131
Miao Xie16cdcec2011-04-22 18:12:22 +08001132 path = btrfs_alloc_path();
1133 if (!path)
1134 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001135
Miao Xie19fd2942011-06-15 10:47:30 +00001136 block_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001137 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001138
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001139 delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001140
1141 curr_node = btrfs_first_delayed_node(delayed_root);
Abaci Teama4559e62021-01-27 16:11:37 +08001142 while (curr_node && (!count || nr--)) {
Miao Xie4ea41ce2012-12-19 06:59:03 +00001143 ret = __btrfs_commit_inode_delayed_items(trans, path,
1144 curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001145 if (ret) {
1146 btrfs_release_delayed_node(curr_node);
Josef Bacik96c3f432012-06-21 14:05:49 -04001147 curr_node = NULL;
Jeff Mahoney66642832016-06-10 18:19:25 -04001148 btrfs_abort_transaction(trans, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001149 break;
1150 }
1151
1152 prev_node = curr_node;
1153 curr_node = btrfs_next_delayed_node(curr_node);
1154 btrfs_release_delayed_node(prev_node);
1155 }
1156
Josef Bacik96c3f432012-06-21 14:05:49 -04001157 if (curr_node)
1158 btrfs_release_delayed_node(curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001159 btrfs_free_path(path);
Miao Xie19fd2942011-06-15 10:47:30 +00001160 trans->block_rsv = block_rsv;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001161
Miao Xie16cdcec2011-04-22 18:12:22 +08001162 return ret;
1163}
1164
Nikolay Borisove5c304e62018-02-07 17:55:43 +02001165int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
Josef Bacik96c3f432012-06-21 14:05:49 -04001166{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001167 return __btrfs_run_delayed_items(trans, -1);
Josef Bacik96c3f432012-06-21 14:05:49 -04001168}
1169
Nikolay Borisove5c304e62018-02-07 17:55:43 +02001170int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
Josef Bacik96c3f432012-06-21 14:05:49 -04001171{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001172 return __btrfs_run_delayed_items(trans, nr);
Josef Bacik96c3f432012-06-21 14:05:49 -04001173}
1174
Miao Xie16cdcec2011-04-22 18:12:22 +08001175int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001176 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001177{
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001178 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001179 struct btrfs_path *path;
1180 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001181 int ret;
1182
1183 if (!delayed_node)
1184 return 0;
1185
1186 mutex_lock(&delayed_node->mutex);
1187 if (!delayed_node->count) {
1188 mutex_unlock(&delayed_node->mutex);
1189 btrfs_release_delayed_node(delayed_node);
1190 return 0;
1191 }
1192 mutex_unlock(&delayed_node->mutex);
1193
Miao Xie4ea41ce2012-12-19 06:59:03 +00001194 path = btrfs_alloc_path();
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001195 if (!path) {
1196 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001197 return -ENOMEM;
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001198 }
Miao Xie4ea41ce2012-12-19 06:59:03 +00001199
1200 block_rsv = trans->block_rsv;
1201 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1202
1203 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1204
Miao Xie16cdcec2011-04-22 18:12:22 +08001205 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001206 btrfs_free_path(path);
1207 trans->block_rsv = block_rsv;
1208
Miao Xie16cdcec2011-04-22 18:12:22 +08001209 return ret;
1210}
1211
Nikolay Borisovaa790212017-01-10 20:35:40 +02001212int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
Miao Xie0e8c36a2012-12-19 06:59:51 +00001213{
David Sterba3ffbd682018-06-29 10:56:42 +02001214 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001215 struct btrfs_trans_handle *trans;
Nikolay Borisovaa790212017-01-10 20:35:40 +02001216 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001217 struct btrfs_path *path;
1218 struct btrfs_block_rsv *block_rsv;
1219 int ret;
1220
1221 if (!delayed_node)
1222 return 0;
1223
1224 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001225 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001226 mutex_unlock(&delayed_node->mutex);
1227 btrfs_release_delayed_node(delayed_node);
1228 return 0;
1229 }
1230 mutex_unlock(&delayed_node->mutex);
1231
1232 trans = btrfs_join_transaction(delayed_node->root);
1233 if (IS_ERR(trans)) {
1234 ret = PTR_ERR(trans);
1235 goto out;
1236 }
1237
1238 path = btrfs_alloc_path();
1239 if (!path) {
1240 ret = -ENOMEM;
1241 goto trans_out;
1242 }
Miao Xie0e8c36a2012-12-19 06:59:51 +00001243
1244 block_rsv = trans->block_rsv;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001245 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001246
1247 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001248 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
Miao Xie0e8c36a2012-12-19 06:59:51 +00001249 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1250 path, delayed_node);
1251 else
1252 ret = 0;
1253 mutex_unlock(&delayed_node->mutex);
1254
1255 btrfs_free_path(path);
1256 trans->block_rsv = block_rsv;
1257trans_out:
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001258 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001259 btrfs_btree_balance_dirty(fs_info);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001260out:
1261 btrfs_release_delayed_node(delayed_node);
1262
1263 return ret;
1264}
1265
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001266void btrfs_remove_delayed_node(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001267{
1268 struct btrfs_delayed_node *delayed_node;
1269
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001270 delayed_node = READ_ONCE(inode->delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001271 if (!delayed_node)
1272 return;
1273
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001274 inode->delayed_node = NULL;
Miao Xie16cdcec2011-04-22 18:12:22 +08001275 btrfs_release_delayed_node(delayed_node);
1276}
1277
Chris Masonde3cb942013-03-04 17:13:31 -05001278struct btrfs_async_delayed_work {
1279 struct btrfs_delayed_root *delayed_root;
1280 int nr;
Qu Wenruod458b052014-02-28 10:46:19 +08001281 struct btrfs_work work;
Miao Xie16cdcec2011-04-22 18:12:22 +08001282};
1283
Qu Wenruod458b052014-02-28 10:46:19 +08001284static void btrfs_async_run_delayed_root(struct btrfs_work *work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001285{
Chris Masonde3cb942013-03-04 17:13:31 -05001286 struct btrfs_async_delayed_work *async_work;
1287 struct btrfs_delayed_root *delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001288 struct btrfs_trans_handle *trans;
1289 struct btrfs_path *path;
1290 struct btrfs_delayed_node *delayed_node = NULL;
1291 struct btrfs_root *root;
Miao Xie19fd2942011-06-15 10:47:30 +00001292 struct btrfs_block_rsv *block_rsv;
Chris Masonde3cb942013-03-04 17:13:31 -05001293 int total_done = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001294
Chris Masonde3cb942013-03-04 17:13:31 -05001295 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1296 delayed_root = async_work->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001297
1298 path = btrfs_alloc_path();
1299 if (!path)
1300 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +08001301
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001302 do {
1303 if (atomic_read(&delayed_root->items) <
1304 BTRFS_DELAYED_BACKGROUND / 2)
1305 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001306
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001307 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1308 if (!delayed_node)
1309 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001310
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001311 root = delayed_node->root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001312
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001313 trans = btrfs_join_transaction(root);
1314 if (IS_ERR(trans)) {
1315 btrfs_release_path(path);
1316 btrfs_release_prepared_delayed_node(delayed_node);
1317 total_done++;
1318 continue;
1319 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001320
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001321 block_rsv = trans->block_rsv;
1322 trans->block_rsv = &root->fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001323
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001324 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001325
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001326 trans->block_rsv = block_rsv;
1327 btrfs_end_transaction(trans);
1328 btrfs_btree_balance_dirty_nodelay(root->fs_info);
Chris Masonde3cb942013-03-04 17:13:31 -05001329
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001330 btrfs_release_path(path);
1331 btrfs_release_prepared_delayed_node(delayed_node);
1332 total_done++;
Chris Masonde3cb942013-03-04 17:13:31 -05001333
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001334 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1335 || total_done < async_work->nr);
Chris Masonde3cb942013-03-04 17:13:31 -05001336
Miao Xie16cdcec2011-04-22 18:12:22 +08001337 btrfs_free_path(path);
1338out:
Chris Masonde3cb942013-03-04 17:13:31 -05001339 wake_up(&delayed_root->wait);
1340 kfree(async_work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001341}
1342
Miao Xie16cdcec2011-04-22 18:12:22 +08001343
Chris Masonde3cb942013-03-04 17:13:31 -05001344static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
Daniel Dresslera585e942014-11-17 22:05:02 +09001345 struct btrfs_fs_info *fs_info, int nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001346{
1347 struct btrfs_async_delayed_work *async_work;
1348
Chris Masonde3cb942013-03-04 17:13:31 -05001349 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1350 if (!async_work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001351 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001352
Chris Masonde3cb942013-03-04 17:13:31 -05001353 async_work->delayed_root = delayed_root;
Omar Sandovala0cac0e2019-09-16 11:30:57 -07001354 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1355 NULL);
Chris Masonde3cb942013-03-04 17:13:31 -05001356 async_work->nr = nr;
Miao Xie16cdcec2011-04-22 18:12:22 +08001357
Daniel Dresslera585e942014-11-17 22:05:02 +09001358 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001359 return 0;
1360}
1361
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001362void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
Chris Masone9993762011-06-17 16:14:09 -04001363{
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001364 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
Chris Masone9993762011-06-17 16:14:09 -04001365}
1366
Miao Xie03538082013-12-26 13:07:03 +08001367static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
Chris Masonde3cb942013-03-04 17:13:31 -05001368{
1369 int val = atomic_read(&delayed_root->items_seq);
1370
Miao Xie03538082013-12-26 13:07:03 +08001371 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
Chris Masonde3cb942013-03-04 17:13:31 -05001372 return 1;
Miao Xie03538082013-12-26 13:07:03 +08001373
1374 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1375 return 1;
1376
Chris Masonde3cb942013-03-04 17:13:31 -05001377 return 0;
1378}
1379
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001380void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
Miao Xie16cdcec2011-04-22 18:12:22 +08001381{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001382 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001383
Nikolay Borisov85777872017-10-23 13:51:49 +03001384 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1385 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
Miao Xie16cdcec2011-04-22 18:12:22 +08001386 return;
1387
1388 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
Miao Xie03538082013-12-26 13:07:03 +08001389 int seq;
Miao Xie16cdcec2011-04-22 18:12:22 +08001390 int ret;
Miao Xie03538082013-12-26 13:07:03 +08001391
1392 seq = atomic_read(&delayed_root->items_seq);
Chris Masonde3cb942013-03-04 17:13:31 -05001393
Daniel Dresslera585e942014-11-17 22:05:02 +09001394 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001395 if (ret)
1396 return;
1397
Miao Xie03538082013-12-26 13:07:03 +08001398 wait_event_interruptible(delayed_root->wait,
1399 could_end_wait(delayed_root, seq));
Miao Xie4dd466d2013-12-26 13:07:02 +08001400 return;
Miao Xie16cdcec2011-04-22 18:12:22 +08001401 }
1402
Daniel Dresslera585e942014-11-17 22:05:02 +09001403 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
Miao Xie16cdcec2011-04-22 18:12:22 +08001404}
1405
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001406/* Will return 0 or -ENOMEM */
Miao Xie16cdcec2011-04-22 18:12:22 +08001407int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001408 const char *name, int name_len,
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001409 struct btrfs_inode *dir,
Miao Xie16cdcec2011-04-22 18:12:22 +08001410 struct btrfs_disk_key *disk_key, u8 type,
1411 u64 index)
1412{
1413 struct btrfs_delayed_node *delayed_node;
1414 struct btrfs_delayed_item *delayed_item;
1415 struct btrfs_dir_item *dir_item;
1416 int ret;
1417
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001418 delayed_node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001419 if (IS_ERR(delayed_node))
1420 return PTR_ERR(delayed_node);
1421
1422 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1423 if (!delayed_item) {
1424 ret = -ENOMEM;
1425 goto release_node;
1426 }
1427
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001428 delayed_item->key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001429 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001430 delayed_item->key.offset = index;
1431
1432 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1433 dir_item->location = *disk_key;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001434 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1435 btrfs_set_stack_dir_data_len(dir_item, 0);
1436 btrfs_set_stack_dir_name_len(dir_item, name_len);
1437 btrfs_set_stack_dir_type(dir_item, type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001438 memcpy((char *)(dir_item + 1), name, name_len);
1439
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001440 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -05001441 /*
1442 * we have reserved enough space when we start a new transaction,
1443 * so reserving metadata failure is impossible
1444 */
1445 BUG_ON(ret);
1446
Miao Xie16cdcec2011-04-22 18:12:22 +08001447 mutex_lock(&delayed_node->mutex);
1448 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1449 if (unlikely(ret)) {
Lu Fengqi4465c8b2018-08-01 11:32:25 +08001450 btrfs_err(trans->fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001451 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09001452 name_len, name, delayed_node->root->root_key.objectid,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001453 delayed_node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001454 BUG();
1455 }
1456 mutex_unlock(&delayed_node->mutex);
1457
1458release_node:
1459 btrfs_release_delayed_node(delayed_node);
1460 return ret;
1461}
1462
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001463static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +08001464 struct btrfs_delayed_node *node,
1465 struct btrfs_key *key)
1466{
1467 struct btrfs_delayed_item *item;
1468
1469 mutex_lock(&node->mutex);
1470 item = __btrfs_lookup_delayed_insertion_item(node, key);
1471 if (!item) {
1472 mutex_unlock(&node->mutex);
1473 return 1;
1474 }
1475
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001476 btrfs_delayed_item_release_metadata(node->root, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001477 btrfs_release_delayed_item(item);
1478 mutex_unlock(&node->mutex);
1479 return 0;
1480}
1481
1482int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001483 struct btrfs_inode *dir, u64 index)
Miao Xie16cdcec2011-04-22 18:12:22 +08001484{
1485 struct btrfs_delayed_node *node;
1486 struct btrfs_delayed_item *item;
1487 struct btrfs_key item_key;
1488 int ret;
1489
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001490 node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001491 if (IS_ERR(node))
1492 return PTR_ERR(node);
1493
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001494 item_key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001495 item_key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001496 item_key.offset = index;
1497
Lu Fengqi9add2942018-08-01 11:32:26 +08001498 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1499 &item_key);
Miao Xie16cdcec2011-04-22 18:12:22 +08001500 if (!ret)
1501 goto end;
1502
1503 item = btrfs_alloc_delayed_item(0);
1504 if (!item) {
1505 ret = -ENOMEM;
1506 goto end;
1507 }
1508
1509 item->key = item_key;
1510
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001511 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001512 /*
1513 * we have reserved enough space when we start a new transaction,
1514 * so reserving metadata failure is impossible.
1515 */
Qu Wenruo933c22a2019-07-16 17:00:32 +08001516 if (ret < 0) {
1517 btrfs_err(trans->fs_info,
1518"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1519 btrfs_release_delayed_item(item);
1520 goto end;
1521 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001522
1523 mutex_lock(&node->mutex);
1524 ret = __btrfs_add_delayed_deletion_item(node, item);
1525 if (unlikely(ret)) {
Lu Fengqi9add2942018-08-01 11:32:26 +08001526 btrfs_err(trans->fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001527 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09001528 index, node->root->root_key.objectid,
1529 node->inode_id, ret);
Qu Wenruo933c22a2019-07-16 17:00:32 +08001530 btrfs_delayed_item_release_metadata(dir->root, item);
1531 btrfs_release_delayed_item(item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001532 }
1533 mutex_unlock(&node->mutex);
1534end:
1535 btrfs_release_delayed_node(node);
1536 return ret;
1537}
1538
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001539int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001540{
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001541 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001542
1543 if (!delayed_node)
1544 return -ENOENT;
1545
1546 /*
1547 * Since we have held i_mutex of this directory, it is impossible that
1548 * a new directory index is added into the delayed node and index_cnt
1549 * is updated now. So we needn't lock the delayed node.
1550 */
Miao Xie2f7e33d2011-06-23 07:27:13 +00001551 if (!delayed_node->index_cnt) {
1552 btrfs_release_delayed_node(delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001553 return -EINVAL;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001554 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001555
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001556 inode->index_cnt = delayed_node->index_cnt;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001557 btrfs_release_delayed_node(delayed_node);
1558 return 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001559}
1560
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001561bool btrfs_readdir_get_delayed_items(struct inode *inode,
1562 struct list_head *ins_list,
1563 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001564{
1565 struct btrfs_delayed_node *delayed_node;
1566 struct btrfs_delayed_item *item;
1567
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001568 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001569 if (!delayed_node)
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001570 return false;
1571
1572 /*
1573 * We can only do one readdir with delayed items at a time because of
1574 * item->readdir_list.
1575 */
Josef Bacik64708532021-02-10 17:14:34 -05001576 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1577 btrfs_inode_lock(inode, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001578
1579 mutex_lock(&delayed_node->mutex);
1580 item = __btrfs_first_delayed_insertion_item(delayed_node);
1581 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001582 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001583 list_add_tail(&item->readdir_list, ins_list);
1584 item = __btrfs_next_delayed_item(item);
1585 }
1586
1587 item = __btrfs_first_delayed_deletion_item(delayed_node);
1588 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001589 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001590 list_add_tail(&item->readdir_list, del_list);
1591 item = __btrfs_next_delayed_item(item);
1592 }
1593 mutex_unlock(&delayed_node->mutex);
1594 /*
1595 * This delayed node is still cached in the btrfs inode, so refs
1596 * must be > 1 now, and we needn't check it is going to be freed
1597 * or not.
1598 *
1599 * Besides that, this function is used to read dir, we do not
1600 * insert/delete delayed items in this period. So we also needn't
1601 * requeue or dequeue this delayed node.
1602 */
Elena Reshetova6de5f182017-03-03 10:55:16 +02001603 refcount_dec(&delayed_node->refs);
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001604
1605 return true;
Miao Xie16cdcec2011-04-22 18:12:22 +08001606}
1607
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001608void btrfs_readdir_put_delayed_items(struct inode *inode,
1609 struct list_head *ins_list,
1610 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001611{
1612 struct btrfs_delayed_item *curr, *next;
1613
1614 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1615 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001616 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001617 kfree(curr);
1618 }
1619
1620 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1621 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001622 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001623 kfree(curr);
1624 }
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001625
1626 /*
1627 * The VFS is going to do up_read(), so we need to downgrade back to a
1628 * read lock.
1629 */
1630 downgrade_write(&inode->i_rwsem);
Miao Xie16cdcec2011-04-22 18:12:22 +08001631}
1632
1633int btrfs_should_delete_dir_index(struct list_head *del_list,
1634 u64 index)
1635{
Josef Bacike4fd4932018-01-23 15:17:05 -05001636 struct btrfs_delayed_item *curr;
1637 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001638
Josef Bacike4fd4932018-01-23 15:17:05 -05001639 list_for_each_entry(curr, del_list, readdir_list) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001640 if (curr->key.offset > index)
1641 break;
Josef Bacike4fd4932018-01-23 15:17:05 -05001642 if (curr->key.offset == index) {
1643 ret = 1;
1644 break;
1645 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001646 }
Josef Bacike4fd4932018-01-23 15:17:05 -05001647 return ret;
Miao Xie16cdcec2011-04-22 18:12:22 +08001648}
1649
1650/*
1651 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1652 *
1653 */
Al Viro9cdda8d2013-05-22 16:48:09 -04001654int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
Jeff Mahoneyd2fbb2b2016-11-05 13:26:35 -04001655 struct list_head *ins_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001656{
1657 struct btrfs_dir_item *di;
1658 struct btrfs_delayed_item *curr, *next;
1659 struct btrfs_key location;
1660 char *name;
1661 int name_len;
1662 int over = 0;
1663 unsigned char d_type;
1664
1665 if (list_empty(ins_list))
1666 return 0;
1667
1668 /*
1669 * Changing the data of the delayed item is impossible. So
1670 * we needn't lock them. And we have held i_mutex of the
1671 * directory, nobody can delete any directory indexes now.
1672 */
1673 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1674 list_del(&curr->readdir_list);
1675
Al Viro9cdda8d2013-05-22 16:48:09 -04001676 if (curr->key.offset < ctx->pos) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001677 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001678 kfree(curr);
1679 continue;
1680 }
1681
Al Viro9cdda8d2013-05-22 16:48:09 -04001682 ctx->pos = curr->key.offset;
Miao Xie16cdcec2011-04-22 18:12:22 +08001683
1684 di = (struct btrfs_dir_item *)curr->data;
1685 name = (char *)(di + 1);
Qu Wenruo3cae2102013-07-16 11:19:18 +08001686 name_len = btrfs_stack_dir_name_len(di);
Miao Xie16cdcec2011-04-22 18:12:22 +08001687
Phillip Potter7d157c32019-03-26 21:39:34 +00001688 d_type = fs_ftype_to_dtype(di->type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001689 btrfs_disk_key_to_cpu(&location, &di->location);
1690
Al Viro9cdda8d2013-05-22 16:48:09 -04001691 over = !dir_emit(ctx, name, name_len,
Miao Xie16cdcec2011-04-22 18:12:22 +08001692 location.objectid, d_type);
1693
Elena Reshetova089e77e2017-03-03 10:55:17 +02001694 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001695 kfree(curr);
1696
1697 if (over)
1698 return 1;
Josef Bacik42e9cc42017-07-24 15:14:26 -04001699 ctx->pos++;
Miao Xie16cdcec2011-04-22 18:12:22 +08001700 }
1701 return 0;
1702}
1703
Miao Xie16cdcec2011-04-22 18:12:22 +08001704static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1705 struct btrfs_inode_item *inode_item,
1706 struct inode *inode)
1707{
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001708 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1709 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001710 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1711 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1712 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1713 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1714 btrfs_set_stack_inode_generation(inode_item,
1715 BTRFS_I(inode)->generation);
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001716 btrfs_set_stack_inode_sequence(inode_item,
1717 inode_peek_iversion(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001718 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1719 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1720 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
Chris Masonff5714c2011-05-28 07:00:39 -04001721 btrfs_set_stack_inode_block_group(inode_item, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001722
David Sterbaa937b972014-12-12 17:39:12 +01001723 btrfs_set_stack_timespec_sec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001724 inode->i_atime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001725 btrfs_set_stack_timespec_nsec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001726 inode->i_atime.tv_nsec);
1727
David Sterbaa937b972014-12-12 17:39:12 +01001728 btrfs_set_stack_timespec_sec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001729 inode->i_mtime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001730 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001731 inode->i_mtime.tv_nsec);
1732
David Sterbaa937b972014-12-12 17:39:12 +01001733 btrfs_set_stack_timespec_sec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001734 inode->i_ctime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001735 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001736 inode->i_ctime.tv_nsec);
chandan r9cc97d62012-07-04 12:48:07 +05301737
1738 btrfs_set_stack_timespec_sec(&inode_item->otime,
1739 BTRFS_I(inode)->i_otime.tv_sec);
1740 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1741 BTRFS_I(inode)->i_otime.tv_nsec);
Miao Xie16cdcec2011-04-22 18:12:22 +08001742}
1743
Miao Xie2f7e33d2011-06-23 07:27:13 +00001744int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1745{
Josef Bacik9ddc9592020-01-17 09:02:22 -05001746 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001747 struct btrfs_delayed_node *delayed_node;
1748 struct btrfs_inode_item *inode_item;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001749
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001750 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001751 if (!delayed_node)
1752 return -ENOENT;
1753
1754 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001755 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie2f7e33d2011-06-23 07:27:13 +00001756 mutex_unlock(&delayed_node->mutex);
1757 btrfs_release_delayed_node(delayed_node);
1758 return -ENOENT;
1759 }
1760
1761 inode_item = &delayed_node->inode_item;
1762
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001763 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1764 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
Nikolay Borisov6ef06d22017-02-20 13:50:34 +02001765 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
Josef Bacik9ddc9592020-01-17 09:02:22 -05001766 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1767 round_up(i_size_read(inode), fs_info->sectorsize));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001768 inode->i_mode = btrfs_stack_inode_mode(inode_item);
Miklos Szeredibfe86842011-10-28 14:13:29 +02001769 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001770 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1771 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
Yang Dongsheng6e17d302015-04-09 12:08:43 +08001772 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1773
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001774 inode_set_iversion_queried(inode,
1775 btrfs_stack_inode_sequence(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001776 inode->i_rdev = 0;
1777 *rdev = btrfs_stack_inode_rdev(inode_item);
1778 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1779
David Sterbaa937b972014-12-12 17:39:12 +01001780 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1781 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001782
David Sterbaa937b972014-12-12 17:39:12 +01001783 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1784 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001785
David Sterbaa937b972014-12-12 17:39:12 +01001786 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1787 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001788
chandan r9cc97d62012-07-04 12:48:07 +05301789 BTRFS_I(inode)->i_otime.tv_sec =
1790 btrfs_stack_timespec_sec(&inode_item->otime);
1791 BTRFS_I(inode)->i_otime.tv_nsec =
1792 btrfs_stack_timespec_nsec(&inode_item->otime);
1793
Miao Xie2f7e33d2011-06-23 07:27:13 +00001794 inode->i_generation = BTRFS_I(inode)->generation;
1795 BTRFS_I(inode)->index_cnt = (u64)-1;
1796
1797 mutex_unlock(&delayed_node->mutex);
1798 btrfs_release_delayed_node(delayed_node);
1799 return 0;
1800}
1801
Miao Xie16cdcec2011-04-22 18:12:22 +08001802int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001803 struct btrfs_root *root,
1804 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001805{
1806 struct btrfs_delayed_node *delayed_node;
David Sterbaaa0467d2011-06-03 16:29:08 +02001807 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001808
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001809 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001810 if (IS_ERR(delayed_node))
1811 return PTR_ERR(delayed_node);
1812
1813 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001814 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001815 fill_stack_inode_item(trans, &delayed_node->inode_item,
1816 &inode->vfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001817 goto release_node;
1818 }
1819
Nikolay Borisov8e3c9d32021-02-22 18:40:46 +02001820 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
Josef Bacikc06a0e12011-11-04 19:56:02 -04001821 if (ret)
1822 goto release_node;
Miao Xie16cdcec2011-04-22 18:12:22 +08001823
Nikolay Borisovf3fbcae2020-11-02 16:48:57 +02001824 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
Miao Xie7cf35d92013-12-26 13:07:05 +08001825 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +08001826 delayed_node->count++;
1827 atomic_inc(&root->fs_info->delayed_root->items);
1828release_node:
1829 mutex_unlock(&delayed_node->mutex);
1830 btrfs_release_delayed_node(delayed_node);
1831 return ret;
1832}
1833
Nikolay Borisove07222c2017-01-10 20:35:37 +02001834int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
Miao Xie67de1172013-12-26 13:07:06 +08001835{
David Sterba3ffbd682018-06-29 10:56:42 +02001836 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie67de1172013-12-26 13:07:06 +08001837 struct btrfs_delayed_node *delayed_node;
1838
Chris Mason6f896052014-12-31 12:18:29 -05001839 /*
1840 * we don't do delayed inode updates during log recovery because it
1841 * leads to enospc problems. This means we also can't do
1842 * delayed inode refs
1843 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001844 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
Chris Mason6f896052014-12-31 12:18:29 -05001845 return -EAGAIN;
1846
Nikolay Borisove07222c2017-01-10 20:35:37 +02001847 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie67de1172013-12-26 13:07:06 +08001848 if (IS_ERR(delayed_node))
1849 return PTR_ERR(delayed_node);
1850
1851 /*
1852 * We don't reserve space for inode ref deletion is because:
1853 * - We ONLY do async inode ref deletion for the inode who has only
1854 * one link(i_nlink == 1), it means there is only one inode ref.
1855 * And in most case, the inode ref and the inode item are in the
1856 * same leaf, and we will deal with them at the same time.
1857 * Since we are sure we will reserve the space for the inode item,
1858 * it is unnecessary to reserve space for inode ref deletion.
1859 * - If the inode ref and the inode item are not in the same leaf,
1860 * We also needn't worry about enospc problem, because we reserve
1861 * much more space for the inode update than it needs.
1862 * - At the worst, we can steal some space from the global reservation.
1863 * It is very rare.
1864 */
1865 mutex_lock(&delayed_node->mutex);
1866 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1867 goto release_node;
1868
1869 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1870 delayed_node->count++;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001871 atomic_inc(&fs_info->delayed_root->items);
Miao Xie67de1172013-12-26 13:07:06 +08001872release_node:
1873 mutex_unlock(&delayed_node->mutex);
1874 btrfs_release_delayed_node(delayed_node);
1875 return 0;
1876}
1877
Miao Xie16cdcec2011-04-22 18:12:22 +08001878static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1879{
1880 struct btrfs_root *root = delayed_node->root;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001881 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001882 struct btrfs_delayed_item *curr_item, *prev_item;
1883
1884 mutex_lock(&delayed_node->mutex);
1885 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1886 while (curr_item) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001887 btrfs_delayed_item_release_metadata(root, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001888 prev_item = curr_item;
1889 curr_item = __btrfs_next_delayed_item(prev_item);
1890 btrfs_release_delayed_item(prev_item);
1891 }
1892
1893 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1894 while (curr_item) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001895 btrfs_delayed_item_release_metadata(root, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001896 prev_item = curr_item;
1897 curr_item = __btrfs_next_delayed_item(prev_item);
1898 btrfs_release_delayed_item(prev_item);
1899 }
1900
Miao Xie67de1172013-12-26 13:07:06 +08001901 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1902 btrfs_release_delayed_iref(delayed_node);
1903
Miao Xie7cf35d92013-12-26 13:07:05 +08001904 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001905 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
Miao Xie16cdcec2011-04-22 18:12:22 +08001906 btrfs_release_delayed_inode(delayed_node);
1907 }
1908 mutex_unlock(&delayed_node->mutex);
1909}
1910
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001911void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001912{
1913 struct btrfs_delayed_node *delayed_node;
1914
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001915 delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001916 if (!delayed_node)
1917 return;
1918
1919 __btrfs_kill_delayed_node(delayed_node);
1920 btrfs_release_delayed_node(delayed_node);
1921}
1922
1923void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1924{
1925 u64 inode_id = 0;
1926 struct btrfs_delayed_node *delayed_nodes[8];
1927 int i, n;
1928
1929 while (1) {
1930 spin_lock(&root->inode_lock);
1931 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1932 (void **)delayed_nodes, inode_id,
1933 ARRAY_SIZE(delayed_nodes));
1934 if (!n) {
1935 spin_unlock(&root->inode_lock);
1936 break;
1937 }
1938
1939 inode_id = delayed_nodes[n - 1]->inode_id + 1;
Josef Bacikbaf320b2019-09-26 08:29:32 -04001940 for (i = 0; i < n; i++) {
1941 /*
1942 * Don't increase refs in case the node is dead and
1943 * about to be removed from the tree in the loop below
1944 */
1945 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1946 delayed_nodes[i] = NULL;
1947 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001948 spin_unlock(&root->inode_lock);
1949
1950 for (i = 0; i < n; i++) {
Josef Bacikbaf320b2019-09-26 08:29:32 -04001951 if (!delayed_nodes[i])
1952 continue;
Miao Xie16cdcec2011-04-22 18:12:22 +08001953 __btrfs_kill_delayed_node(delayed_nodes[i]);
1954 btrfs_release_delayed_node(delayed_nodes[i]);
1955 }
1956 }
1957}
Miao Xie67cde342012-06-14 02:23:22 -06001958
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001959void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
Miao Xie67cde342012-06-14 02:23:22 -06001960{
Miao Xie67cde342012-06-14 02:23:22 -06001961 struct btrfs_delayed_node *curr_node, *prev_node;
1962
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001963 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
Miao Xie67cde342012-06-14 02:23:22 -06001964 while (curr_node) {
1965 __btrfs_kill_delayed_node(curr_node);
1966
1967 prev_node = curr_node;
1968 curr_node = btrfs_next_delayed_node(curr_node);
1969 btrfs_release_delayed_node(prev_node);
1970 }
1971}
1972