blob: 4e2cce5ca7f6a3f4a3ede8e7c5e7e123a9cde910 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Miao Xie16cdcec2011-04-22 18:12:22 +08002/*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
Miao Xie16cdcec2011-04-22 18:12:22 +08005 */
6
7#include <linux/slab.h>
Jeff Laytonc7f88c42017-12-11 06:35:12 -05008#include <linux/iversion.h>
Josef Bacik351cbf62020-03-19 10:11:32 -04009#include <linux/sched/mm.h>
David Sterba602cbe92019-08-21 18:48:25 +020010#include "misc.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080011#include "delayed-inode.h"
12#include "disk-io.h"
13#include "transaction.h"
Qu Wenruo3cae2102013-07-16 11:19:18 +080014#include "ctree.h"
Qu Wenruo4f5427c2017-12-12 15:34:33 +080015#include "qgroup.h"
David Sterba1f95ec02019-09-24 19:17:17 +020016#include "locking.h"
Miao Xie16cdcec2011-04-22 18:12:22 +080017
Chris Masonde3cb942013-03-04 17:13:31 -050018#define BTRFS_DELAYED_WRITEBACK 512
19#define BTRFS_DELAYED_BACKGROUND 128
20#define BTRFS_DELAYED_BATCH 16
Miao Xie16cdcec2011-04-22 18:12:22 +080021
22static struct kmem_cache *delayed_node_cache;
23
24int __init btrfs_delayed_inode_init(void)
25{
David Sterba837e1972012-09-07 03:00:48 -060026 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
Miao Xie16cdcec2011-04-22 18:12:22 +080027 sizeof(struct btrfs_delayed_node),
28 0,
Nikolay Borisovfba4b692016-06-23 21:17:08 +030029 SLAB_MEM_SPREAD,
Miao Xie16cdcec2011-04-22 18:12:22 +080030 NULL);
31 if (!delayed_node_cache)
32 return -ENOMEM;
33 return 0;
34}
35
David Sterbae67c7182018-02-19 17:24:18 +010036void __cold btrfs_delayed_inode_exit(void)
Miao Xie16cdcec2011-04-22 18:12:22 +080037{
Kinglong Mee5598e902016-01-29 21:36:35 +080038 kmem_cache_destroy(delayed_node_cache);
Miao Xie16cdcec2011-04-22 18:12:22 +080039}
40
41static inline void btrfs_init_delayed_node(
42 struct btrfs_delayed_node *delayed_node,
43 struct btrfs_root *root, u64 inode_id)
44{
45 delayed_node->root = root;
46 delayed_node->inode_id = inode_id;
Elena Reshetova6de5f182017-03-03 10:55:16 +020047 refcount_set(&delayed_node->refs, 0);
Liu Bo03a1d4c2018-08-23 03:51:51 +080048 delayed_node->ins_root = RB_ROOT_CACHED;
49 delayed_node->del_root = RB_ROOT_CACHED;
Miao Xie16cdcec2011-04-22 18:12:22 +080050 mutex_init(&delayed_node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +080051 INIT_LIST_HEAD(&delayed_node->n_list);
52 INIT_LIST_HEAD(&delayed_node->p_list);
Miao Xie16cdcec2011-04-22 18:12:22 +080053}
54
55static inline int btrfs_is_continuous_delayed_item(
56 struct btrfs_delayed_item *item1,
57 struct btrfs_delayed_item *item2)
58{
59 if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
60 item1->key.objectid == item2->key.objectid &&
61 item1->key.type == item2->key.type &&
62 item1->key.offset + 1 == item2->key.offset)
63 return 1;
64 return 0;
65}
66
David Sterbaf85b7372017-01-20 14:54:07 +010067static struct btrfs_delayed_node *btrfs_get_delayed_node(
68 struct btrfs_inode *btrfs_inode)
Miao Xie2f7e33d2011-06-23 07:27:13 +000069{
Miao Xie2f7e33d2011-06-23 07:27:13 +000070 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +020071 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +000072 struct btrfs_delayed_node *node;
73
Seraphime Kirkovski20c7bce2016-12-15 14:38:16 +010074 node = READ_ONCE(btrfs_inode->delayed_node);
Miao Xie2f7e33d2011-06-23 07:27:13 +000075 if (node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020076 refcount_inc(&node->refs);
Miao Xie2f7e33d2011-06-23 07:27:13 +000077 return node;
78 }
79
80 spin_lock(&root->inode_lock);
81 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
Chris Masonec35e482017-12-15 11:58:27 -080082
Miao Xie2f7e33d2011-06-23 07:27:13 +000083 if (node) {
84 if (btrfs_inode->delayed_node) {
Elena Reshetova6de5f182017-03-03 10:55:16 +020085 refcount_inc(&node->refs); /* can be accessed */
Miao Xie2f7e33d2011-06-23 07:27:13 +000086 BUG_ON(btrfs_inode->delayed_node != node);
87 spin_unlock(&root->inode_lock);
88 return node;
89 }
Chris Masonec35e482017-12-15 11:58:27 -080090
91 /*
92 * It's possible that we're racing into the middle of removing
93 * this node from the radix tree. In this case, the refcount
94 * was zero and it should never go back to one. Just return
95 * NULL like it was never in the radix at all; our release
96 * function is in the process of removing it.
97 *
98 * Some implementations of refcount_inc refuse to bump the
99 * refcount once it has hit zero. If we don't do this dance
100 * here, refcount_inc() may decide to just WARN_ONCE() instead
101 * of actually bumping the refcount.
102 *
103 * If this node is properly in the radix, we want to bump the
104 * refcount twice, once for the inode and once for this get
105 * operation.
106 */
107 if (refcount_inc_not_zero(&node->refs)) {
108 refcount_inc(&node->refs);
109 btrfs_inode->delayed_node = node;
110 } else {
111 node = NULL;
112 }
113
Miao Xie2f7e33d2011-06-23 07:27:13 +0000114 spin_unlock(&root->inode_lock);
115 return node;
116 }
117 spin_unlock(&root->inode_lock);
118
119 return NULL;
120}
121
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100122/* Will return either the node or PTR_ERR(-ENOMEM) */
Miao Xie16cdcec2011-04-22 18:12:22 +0800123static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
David Sterbaf85b7372017-01-20 14:54:07 +0100124 struct btrfs_inode *btrfs_inode)
Miao Xie16cdcec2011-04-22 18:12:22 +0800125{
126 struct btrfs_delayed_node *node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800127 struct btrfs_root *root = btrfs_inode->root;
Nikolay Borisov4a0cc7c2017-01-10 20:35:31 +0200128 u64 ino = btrfs_ino(btrfs_inode);
Miao Xie16cdcec2011-04-22 18:12:22 +0800129 int ret;
130
131again:
Nikolay Borisov340c6ca2017-01-10 20:35:32 +0200132 node = btrfs_get_delayed_node(btrfs_inode);
Miao Xie2f7e33d2011-06-23 07:27:13 +0000133 if (node)
Miao Xie16cdcec2011-04-22 18:12:22 +0800134 return node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800135
Alexandru Moise352dd9c2015-10-25 20:15:06 +0000136 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800137 if (!node)
138 return ERR_PTR(-ENOMEM);
Chris Mason0d0ca302011-05-22 07:11:22 -0400139 btrfs_init_delayed_node(node, root, ino);
Miao Xie16cdcec2011-04-22 18:12:22 +0800140
Rashika95e94d12013-10-31 03:12:42 +0530141 /* cached in the btrfs inode and can be accessed */
Elena Reshetova6de5f182017-03-03 10:55:16 +0200142 refcount_set(&node->refs, 2);
Miao Xie16cdcec2011-04-22 18:12:22 +0800143
David Sterbae1860a72016-05-09 14:11:38 +0200144 ret = radix_tree_preload(GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800145 if (ret) {
146 kmem_cache_free(delayed_node_cache, node);
147 return ERR_PTR(ret);
148 }
149
150 spin_lock(&root->inode_lock);
Chris Mason0d0ca302011-05-22 07:11:22 -0400151 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800152 if (ret == -EEXIST) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800153 spin_unlock(&root->inode_lock);
Jeff Mahoney96493032014-05-27 13:53:20 -0400154 kmem_cache_free(delayed_node_cache, node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800155 radix_tree_preload_end();
156 goto again;
157 }
158 btrfs_inode->delayed_node = node;
159 spin_unlock(&root->inode_lock);
160 radix_tree_preload_end();
161
162 return node;
163}
164
165/*
166 * Call it when holding delayed_node->mutex
167 *
168 * If mod = 1, add this node into the prepared list.
169 */
170static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
171 struct btrfs_delayed_node *node,
172 int mod)
173{
174 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800175 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800176 if (!list_empty(&node->p_list))
177 list_move_tail(&node->p_list, &root->prepare_list);
178 else if (mod)
179 list_add_tail(&node->p_list, &root->prepare_list);
180 } else {
181 list_add_tail(&node->n_list, &root->node_list);
182 list_add_tail(&node->p_list, &root->prepare_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200183 refcount_inc(&node->refs); /* inserted into list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800184 root->nodes++;
Miao Xie7cf35d92013-12-26 13:07:05 +0800185 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800186 }
187 spin_unlock(&root->lock);
188}
189
190/* Call it when holding delayed_node->mutex */
191static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
192 struct btrfs_delayed_node *node)
193{
194 spin_lock(&root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800195 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800196 root->nodes--;
Elena Reshetova6de5f182017-03-03 10:55:16 +0200197 refcount_dec(&node->refs); /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800198 list_del_init(&node->n_list);
199 if (!list_empty(&node->p_list))
200 list_del_init(&node->p_list);
Miao Xie7cf35d92013-12-26 13:07:05 +0800201 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800202 }
203 spin_unlock(&root->lock);
204}
205
Eric Sandeen48a3b632013-04-25 20:41:01 +0000206static struct btrfs_delayed_node *btrfs_first_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800207 struct btrfs_delayed_root *delayed_root)
208{
209 struct list_head *p;
210 struct btrfs_delayed_node *node = NULL;
211
212 spin_lock(&delayed_root->lock);
213 if (list_empty(&delayed_root->node_list))
214 goto out;
215
216 p = delayed_root->node_list.next;
217 node = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200218 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800219out:
220 spin_unlock(&delayed_root->lock);
221
222 return node;
223}
224
Eric Sandeen48a3b632013-04-25 20:41:01 +0000225static struct btrfs_delayed_node *btrfs_next_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800226 struct btrfs_delayed_node *node)
227{
228 struct btrfs_delayed_root *delayed_root;
229 struct list_head *p;
230 struct btrfs_delayed_node *next = NULL;
231
232 delayed_root = node->root->fs_info->delayed_root;
233 spin_lock(&delayed_root->lock);
Miao Xie7cf35d92013-12-26 13:07:05 +0800234 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
235 /* not in the list */
Miao Xie16cdcec2011-04-22 18:12:22 +0800236 if (list_empty(&delayed_root->node_list))
237 goto out;
238 p = delayed_root->node_list.next;
239 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
240 goto out;
241 else
242 p = node->n_list.next;
243
244 next = list_entry(p, struct btrfs_delayed_node, n_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200245 refcount_inc(&next->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800246out:
247 spin_unlock(&delayed_root->lock);
248
249 return next;
250}
251
252static void __btrfs_release_delayed_node(
253 struct btrfs_delayed_node *delayed_node,
254 int mod)
255{
256 struct btrfs_delayed_root *delayed_root;
257
258 if (!delayed_node)
259 return;
260
261 delayed_root = delayed_node->root->fs_info->delayed_root;
262
263 mutex_lock(&delayed_node->mutex);
264 if (delayed_node->count)
265 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
266 else
267 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
268 mutex_unlock(&delayed_node->mutex);
269
Elena Reshetova6de5f182017-03-03 10:55:16 +0200270 if (refcount_dec_and_test(&delayed_node->refs)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800271 struct btrfs_root *root = delayed_node->root;
Chris Masonec35e482017-12-15 11:58:27 -0800272
Miao Xie16cdcec2011-04-22 18:12:22 +0800273 spin_lock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800274 /*
275 * Once our refcount goes to zero, nobody is allowed to bump it
276 * back up. We can delete it now.
277 */
278 ASSERT(refcount_read(&delayed_node->refs) == 0);
279 radix_tree_delete(&root->delayed_nodes_tree,
280 delayed_node->inode_id);
Miao Xie16cdcec2011-04-22 18:12:22 +0800281 spin_unlock(&root->inode_lock);
Chris Masonec35e482017-12-15 11:58:27 -0800282 kmem_cache_free(delayed_node_cache, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +0800283 }
284}
285
286static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
287{
288 __btrfs_release_delayed_node(node, 0);
289}
290
Eric Sandeen48a3b632013-04-25 20:41:01 +0000291static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
Miao Xie16cdcec2011-04-22 18:12:22 +0800292 struct btrfs_delayed_root *delayed_root)
293{
294 struct list_head *p;
295 struct btrfs_delayed_node *node = NULL;
296
297 spin_lock(&delayed_root->lock);
298 if (list_empty(&delayed_root->prepare_list))
299 goto out;
300
301 p = delayed_root->prepare_list.next;
302 list_del_init(p);
303 node = list_entry(p, struct btrfs_delayed_node, p_list);
Elena Reshetova6de5f182017-03-03 10:55:16 +0200304 refcount_inc(&node->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +0800305out:
306 spin_unlock(&delayed_root->lock);
307
308 return node;
309}
310
311static inline void btrfs_release_prepared_delayed_node(
312 struct btrfs_delayed_node *node)
313{
314 __btrfs_release_delayed_node(node, 1);
315}
316
Eric Sandeen48a3b632013-04-25 20:41:01 +0000317static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
Miao Xie16cdcec2011-04-22 18:12:22 +0800318{
319 struct btrfs_delayed_item *item;
320 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
321 if (item) {
322 item->data_len = data_len;
323 item->ins_or_del = 0;
324 item->bytes_reserved = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800325 item->delayed_node = NULL;
Elena Reshetova089e77e2017-03-03 10:55:17 +0200326 refcount_set(&item->refs, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800327 }
328 return item;
329}
330
331/*
332 * __btrfs_lookup_delayed_item - look up the delayed item by key
333 * @delayed_node: pointer to the delayed node
334 * @key: the key to look up
335 * @prev: used to store the prev item if the right item isn't found
336 * @next: used to store the next item if the right item isn't found
337 *
338 * Note: if we don't find the right item, we will return the prev item and
339 * the next item.
340 */
341static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
342 struct rb_root *root,
343 struct btrfs_key *key,
344 struct btrfs_delayed_item **prev,
345 struct btrfs_delayed_item **next)
346{
347 struct rb_node *node, *prev_node = NULL;
348 struct btrfs_delayed_item *delayed_item = NULL;
349 int ret = 0;
350
351 node = root->rb_node;
352
353 while (node) {
354 delayed_item = rb_entry(node, struct btrfs_delayed_item,
355 rb_node);
356 prev_node = node;
357 ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
358 if (ret < 0)
359 node = node->rb_right;
360 else if (ret > 0)
361 node = node->rb_left;
362 else
363 return delayed_item;
364 }
365
366 if (prev) {
367 if (!prev_node)
368 *prev = NULL;
369 else if (ret < 0)
370 *prev = delayed_item;
371 else if ((node = rb_prev(prev_node)) != NULL) {
372 *prev = rb_entry(node, struct btrfs_delayed_item,
373 rb_node);
374 } else
375 *prev = NULL;
376 }
377
378 if (next) {
379 if (!prev_node)
380 *next = NULL;
381 else if (ret > 0)
382 *next = delayed_item;
383 else if ((node = rb_next(prev_node)) != NULL) {
384 *next = rb_entry(node, struct btrfs_delayed_item,
385 rb_node);
386 } else
387 *next = NULL;
388 }
389 return NULL;
390}
391
Eric Sandeen48a3b632013-04-25 20:41:01 +0000392static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800393 struct btrfs_delayed_node *delayed_node,
394 struct btrfs_key *key)
395{
Liu Bo03a1d4c2018-08-23 03:51:51 +0800396 return __btrfs_lookup_delayed_item(&delayed_node->ins_root.rb_root, key,
Miao Xie16cdcec2011-04-22 18:12:22 +0800397 NULL, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800398}
399
Miao Xie16cdcec2011-04-22 18:12:22 +0800400static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
401 struct btrfs_delayed_item *ins,
402 int action)
403{
404 struct rb_node **p, *node;
405 struct rb_node *parent_node = NULL;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800406 struct rb_root_cached *root;
Miao Xie16cdcec2011-04-22 18:12:22 +0800407 struct btrfs_delayed_item *item;
408 int cmp;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800409 bool leftmost = true;
Miao Xie16cdcec2011-04-22 18:12:22 +0800410
411 if (action == BTRFS_DELAYED_INSERTION_ITEM)
412 root = &delayed_node->ins_root;
413 else if (action == BTRFS_DELAYED_DELETION_ITEM)
414 root = &delayed_node->del_root;
415 else
416 BUG();
Liu Bo03a1d4c2018-08-23 03:51:51 +0800417 p = &root->rb_root.rb_node;
Miao Xie16cdcec2011-04-22 18:12:22 +0800418 node = &ins->rb_node;
419
420 while (*p) {
421 parent_node = *p;
422 item = rb_entry(parent_node, struct btrfs_delayed_item,
423 rb_node);
424
425 cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
Liu Bo03a1d4c2018-08-23 03:51:51 +0800426 if (cmp < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800427 p = &(*p)->rb_right;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800428 leftmost = false;
429 } else if (cmp > 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800430 p = &(*p)->rb_left;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800431 } else {
Miao Xie16cdcec2011-04-22 18:12:22 +0800432 return -EEXIST;
Liu Bo03a1d4c2018-08-23 03:51:51 +0800433 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800434 }
435
436 rb_link_node(node, parent_node, p);
Liu Bo03a1d4c2018-08-23 03:51:51 +0800437 rb_insert_color_cached(node, root, leftmost);
Miao Xie16cdcec2011-04-22 18:12:22 +0800438 ins->delayed_node = delayed_node;
439 ins->ins_or_del = action;
440
441 if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
442 action == BTRFS_DELAYED_INSERTION_ITEM &&
443 ins->key.offset >= delayed_node->index_cnt)
444 delayed_node->index_cnt = ins->key.offset + 1;
445
446 delayed_node->count++;
447 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
448 return 0;
449}
450
451static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
452 struct btrfs_delayed_item *item)
453{
454 return __btrfs_add_delayed_item(node, item,
455 BTRFS_DELAYED_INSERTION_ITEM);
456}
457
458static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
459 struct btrfs_delayed_item *item)
460{
461 return __btrfs_add_delayed_item(node, item,
462 BTRFS_DELAYED_DELETION_ITEM);
463}
464
Chris Masonde3cb942013-03-04 17:13:31 -0500465static void finish_one_item(struct btrfs_delayed_root *delayed_root)
466{
467 int seq = atomic_inc_return(&delayed_root->items_seq);
David Sterbaee863952015-02-16 19:41:40 +0100468
David Sterba093258e2018-02-26 16:15:17 +0100469 /* atomic_dec_return implies a barrier */
Chris Masonde3cb942013-03-04 17:13:31 -0500470 if ((atomic_dec_return(&delayed_root->items) <
David Sterba093258e2018-02-26 16:15:17 +0100471 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
472 cond_wake_up_nomb(&delayed_root->wait);
Chris Masonde3cb942013-03-04 17:13:31 -0500473}
474
Miao Xie16cdcec2011-04-22 18:12:22 +0800475static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
476{
Liu Bo03a1d4c2018-08-23 03:51:51 +0800477 struct rb_root_cached *root;
Miao Xie16cdcec2011-04-22 18:12:22 +0800478 struct btrfs_delayed_root *delayed_root;
479
Qu Wenruo933c22a2019-07-16 17:00:32 +0800480 /* Not associated with any delayed_node */
481 if (!delayed_item->delayed_node)
482 return;
Miao Xie16cdcec2011-04-22 18:12:22 +0800483 delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
484
485 BUG_ON(!delayed_root);
486 BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
487 delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
488
489 if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
490 root = &delayed_item->delayed_node->ins_root;
491 else
492 root = &delayed_item->delayed_node->del_root;
493
Liu Bo03a1d4c2018-08-23 03:51:51 +0800494 rb_erase_cached(&delayed_item->rb_node, root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800495 delayed_item->delayed_node->count--;
Chris Masonde3cb942013-03-04 17:13:31 -0500496
497 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800498}
499
500static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
501{
502 if (item) {
503 __btrfs_remove_delayed_item(item);
Elena Reshetova089e77e2017-03-03 10:55:17 +0200504 if (refcount_dec_and_test(&item->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +0800505 kfree(item);
506 }
507}
508
Eric Sandeen48a3b632013-04-25 20:41:01 +0000509static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800510 struct btrfs_delayed_node *delayed_node)
511{
512 struct rb_node *p;
513 struct btrfs_delayed_item *item = NULL;
514
Liu Bo03a1d4c2018-08-23 03:51:51 +0800515 p = rb_first_cached(&delayed_node->ins_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800516 if (p)
517 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
518
519 return item;
520}
521
Eric Sandeen48a3b632013-04-25 20:41:01 +0000522static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800523 struct btrfs_delayed_node *delayed_node)
524{
525 struct rb_node *p;
526 struct btrfs_delayed_item *item = NULL;
527
Liu Bo03a1d4c2018-08-23 03:51:51 +0800528 p = rb_first_cached(&delayed_node->del_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800529 if (p)
530 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
531
532 return item;
533}
534
Eric Sandeen48a3b632013-04-25 20:41:01 +0000535static struct btrfs_delayed_item *__btrfs_next_delayed_item(
Miao Xie16cdcec2011-04-22 18:12:22 +0800536 struct btrfs_delayed_item *item)
537{
538 struct rb_node *p;
539 struct btrfs_delayed_item *next = NULL;
540
541 p = rb_next(&item->rb_node);
542 if (p)
543 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
544
545 return next;
546}
547
Miao Xie16cdcec2011-04-22 18:12:22 +0800548static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800549 struct btrfs_root *root,
Miao Xie16cdcec2011-04-22 18:12:22 +0800550 struct btrfs_delayed_item *item)
551{
552 struct btrfs_block_rsv *src_rsv;
553 struct btrfs_block_rsv *dst_rsv;
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800554 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800555 u64 num_bytes;
556 int ret;
557
558 if (!trans->bytes_reserved)
559 return 0;
560
561 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400562 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800563
Josef Bacik2bd36e72019-08-22 15:14:33 -0400564 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
Qu Wenruof218ea62018-04-17 16:52:45 +0800565
566 /*
567 * Here we migrate space rsv from transaction rsv, since have already
568 * reserved space when starting a transaction. So no need to reserve
569 * qgroup space here.
570 */
Lu Fengqi3a584172018-08-04 21:10:55 +0800571 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500572 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400573 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500574 item->key.objectid,
575 num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800576 item->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500577 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800578
579 return ret;
580}
581
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800582static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
Miao Xie16cdcec2011-04-22 18:12:22 +0800583 struct btrfs_delayed_item *item)
584{
Miao Xie19fd2942011-06-15 10:47:30 +0000585 struct btrfs_block_rsv *rsv;
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800586 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie19fd2942011-06-15 10:47:30 +0000587
Miao Xie16cdcec2011-04-22 18:12:22 +0800588 if (!item->bytes_reserved)
589 return;
590
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400591 rsv = &fs_info->delayed_block_rsv;
Qu Wenruof218ea62018-04-17 16:52:45 +0800592 /*
593 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
594 * to release/reserve qgroup space.
595 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400596 trace_btrfs_space_reservation(fs_info, "delayed_item",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500597 item->key.objectid, item->bytes_reserved,
598 0);
Nikolay Borisov63f018b2020-03-10 10:59:31 +0200599 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
Miao Xie16cdcec2011-04-22 18:12:22 +0800600}
601
602static int btrfs_delayed_inode_reserve_metadata(
603 struct btrfs_trans_handle *trans,
604 struct btrfs_root *root,
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200605 struct btrfs_inode *inode,
Miao Xie16cdcec2011-04-22 18:12:22 +0800606 struct btrfs_delayed_node *node)
607{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400608 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +0800609 struct btrfs_block_rsv *src_rsv;
610 struct btrfs_block_rsv *dst_rsv;
611 u64 num_bytes;
612 int ret;
613
Miao Xie16cdcec2011-04-22 18:12:22 +0800614 src_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400615 dst_rsv = &fs_info->delayed_block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +0800616
Josef Bacikbcacf5f32019-08-22 15:14:34 -0400617 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400618
619 /*
620 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
621 * which doesn't reserve space for speed. This is a problem since we
622 * still need to reserve space for this update, so try to reserve the
623 * space.
624 *
625 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
Josef Bacik69fe2d72017-10-19 14:15:57 -0400626 * we always reserve enough to update the inode item.
Josef Bacikc06a0e12011-11-04 19:56:02 -0400627 */
Chris Masone755d9a2011-12-15 13:36:29 -0500628 if (!src_rsv || (!trans->bytes_reserved &&
Miao Xie66d8f3d2012-09-06 04:02:28 -0600629 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
Nikolay Borisovbf6dd432021-02-22 18:40:44 +0200630 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
631 BTRFS_QGROUP_RSV_META_PREALLOC, true);
Qu Wenruof218ea62018-04-17 16:52:45 +0800632 if (ret < 0)
633 return ret;
Miao Xie08e007d2012-10-16 11:33:38 +0000634 ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
635 BTRFS_RESERVE_NO_FLUSH);
Josef Bacikc06a0e12011-11-04 19:56:02 -0400636 /*
637 * Since we're under a transaction reserve_metadata_bytes could
638 * try to commit the transaction which will make it return
639 * EAGAIN to make us stop the transaction we have, so return
640 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
641 */
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800642 if (ret == -EAGAIN) {
Josef Bacikc06a0e12011-11-04 19:56:02 -0400643 ret = -ENOSPC;
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800644 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
645 }
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500646 if (!ret) {
Josef Bacikc06a0e12011-11-04 19:56:02 -0400647 node->bytes_reserved = num_bytes;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400648 trace_btrfs_space_reservation(fs_info,
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500649 "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200650 btrfs_ino(inode),
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500651 num_bytes, 1);
Qu Wenruof218ea62018-04-17 16:52:45 +0800652 } else {
Nikolay Borisov37ffce92021-02-22 18:40:42 +0200653 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500654 }
Josef Bacikc06a0e12011-11-04 19:56:02 -0400655 return ret;
656 }
657
Lu Fengqi3a584172018-08-04 21:10:55 +0800658 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500659 if (!ret) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400660 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Nikolay Borisovfcabdd12017-01-10 20:35:34 +0200661 btrfs_ino(inode), num_bytes, 1);
Miao Xie16cdcec2011-04-22 18:12:22 +0800662 node->bytes_reserved = num_bytes;
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500663 }
Miao Xie16cdcec2011-04-22 18:12:22 +0800664
665 return ret;
666}
667
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400668static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800669 struct btrfs_delayed_node *node,
670 bool qgroup_free)
Miao Xie16cdcec2011-04-22 18:12:22 +0800671{
672 struct btrfs_block_rsv *rsv;
673
674 if (!node->bytes_reserved)
675 return;
676
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400677 rsv = &fs_info->delayed_block_rsv;
678 trace_btrfs_space_reservation(fs_info, "delayed_inode",
Josef Bacik8c2a3ca2012-01-10 10:31:31 -0500679 node->inode_id, node->bytes_reserved, 0);
Nikolay Borisov63f018b2020-03-10 10:59:31 +0200680 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800681 if (qgroup_free)
682 btrfs_qgroup_free_meta_prealloc(node->root,
683 node->bytes_reserved);
684 else
685 btrfs_qgroup_convert_reserved_meta(node->root,
686 node->bytes_reserved);
Miao Xie16cdcec2011-04-22 18:12:22 +0800687 node->bytes_reserved = 0;
688}
689
690/*
691 * This helper will insert some continuous items into the same leaf according
692 * to the free space of the leaf.
693 */
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000694static int btrfs_batch_insert_items(struct btrfs_root *root,
695 struct btrfs_path *path,
696 struct btrfs_delayed_item *item)
Miao Xie16cdcec2011-04-22 18:12:22 +0800697{
698 struct btrfs_delayed_item *curr, *next;
699 int free_space;
700 int total_data_size = 0, total_size = 0;
701 struct extent_buffer *leaf;
702 char *data_ptr;
703 struct btrfs_key *keys;
704 u32 *data_size;
705 struct list_head head;
706 int slot;
707 int nitems;
708 int i;
709 int ret = 0;
710
711 BUG_ON(!path->nodes[0]);
712
713 leaf = path->nodes[0];
David Sterbae902baa2019-03-20 14:36:46 +0100714 free_space = btrfs_leaf_free_space(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +0800715 INIT_LIST_HEAD(&head);
716
717 next = item;
Chris Mason17aca1c2011-06-03 01:13:45 -0400718 nitems = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +0800719
720 /*
721 * count the number of the continuous items that we can insert in batch
722 */
723 while (total_size + next->data_len + sizeof(struct btrfs_item) <=
724 free_space) {
725 total_data_size += next->data_len;
726 total_size += next->data_len + sizeof(struct btrfs_item);
727 list_add_tail(&next->tree_list, &head);
728 nitems++;
729
730 curr = next;
731 next = __btrfs_next_delayed_item(curr);
732 if (!next)
733 break;
734
735 if (!btrfs_is_continuous_delayed_item(curr, next))
736 break;
737 }
738
739 if (!nitems) {
740 ret = 0;
741 goto out;
742 }
743
744 /*
745 * we need allocate some memory space, but it might cause the task
746 * to sleep, so we set all locked nodes in the path to blocking locks
747 * first.
748 */
749 btrfs_set_path_blocking(path);
750
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530751 keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800752 if (!keys) {
753 ret = -ENOMEM;
754 goto out;
755 }
756
Dulshani Gunawardhanad9b0d9b2013-10-31 10:32:18 +0530757 data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
Miao Xie16cdcec2011-04-22 18:12:22 +0800758 if (!data_size) {
759 ret = -ENOMEM;
760 goto error;
761 }
762
763 /* get keys of all the delayed items */
764 i = 0;
765 list_for_each_entry(next, &head, tree_list) {
766 keys[i] = next->key;
767 data_size[i] = next->data_len;
768 i++;
769 }
770
Miao Xie16cdcec2011-04-22 18:12:22 +0800771 /* insert the keys of the items */
Nikolay Borisovfc0d82e2020-09-01 17:39:59 +0300772 setup_items_for_insert(root, path, keys, data_size, nitems);
Miao Xie16cdcec2011-04-22 18:12:22 +0800773
774 /* insert the dir index items */
775 slot = path->slots[0];
776 list_for_each_entry_safe(curr, next, &head, tree_list) {
777 data_ptr = btrfs_item_ptr(leaf, slot, char);
778 write_extent_buffer(leaf, &curr->data,
779 (unsigned long)data_ptr,
780 curr->data_len);
781 slot++;
782
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800783 btrfs_delayed_item_release_metadata(root, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800784
785 list_del(&curr->tree_list);
786 btrfs_release_delayed_item(curr);
787 }
788
789error:
790 kfree(data_size);
791 kfree(keys);
792out:
793 return ret;
794}
795
796/*
797 * This helper can just do simple insertion that needn't extend item for new
798 * data, such as directory name index insertion, inode insertion.
799 */
800static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
801 struct btrfs_root *root,
802 struct btrfs_path *path,
803 struct btrfs_delayed_item *delayed_item)
804{
805 struct extent_buffer *leaf;
Josef Bacik351cbf62020-03-19 10:11:32 -0400806 unsigned int nofs_flag;
Miao Xie16cdcec2011-04-22 18:12:22 +0800807 char *ptr;
808 int ret;
809
Josef Bacik351cbf62020-03-19 10:11:32 -0400810 nofs_flag = memalloc_nofs_save();
Miao Xie16cdcec2011-04-22 18:12:22 +0800811 ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
812 delayed_item->data_len);
Josef Bacik351cbf62020-03-19 10:11:32 -0400813 memalloc_nofs_restore(nofs_flag);
Miao Xie16cdcec2011-04-22 18:12:22 +0800814 if (ret < 0 && ret != -EEXIST)
815 return ret;
816
817 leaf = path->nodes[0];
818
Miao Xie16cdcec2011-04-22 18:12:22 +0800819 ptr = btrfs_item_ptr(leaf, path->slots[0], char);
820
821 write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
822 delayed_item->data_len);
823 btrfs_mark_buffer_dirty(leaf);
824
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800825 btrfs_delayed_item_release_metadata(root, delayed_item);
Miao Xie16cdcec2011-04-22 18:12:22 +0800826 return 0;
827}
828
829/*
830 * we insert an item first, then if there are some continuous items, we try
831 * to insert those items into the same leaf.
832 */
833static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
834 struct btrfs_path *path,
835 struct btrfs_root *root,
836 struct btrfs_delayed_node *node)
837{
838 struct btrfs_delayed_item *curr, *prev;
839 int ret = 0;
840
841do_again:
842 mutex_lock(&node->mutex);
843 curr = __btrfs_first_delayed_insertion_item(node);
844 if (!curr)
845 goto insert_end;
846
847 ret = btrfs_insert_delayed_item(trans, root, path, curr);
848 if (ret < 0) {
Chris Mason945d8962011-05-22 12:33:42 -0400849 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800850 goto insert_end;
851 }
852
853 prev = curr;
854 curr = __btrfs_next_delayed_item(prev);
855 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
856 /* insert the continuous items into the same leaf */
857 path->slots[0]++;
Tsutomu Itohafe5fea2013-04-16 05:18:22 +0000858 btrfs_batch_insert_items(root, path, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800859 }
860 btrfs_release_delayed_item(prev);
861 btrfs_mark_buffer_dirty(path->nodes[0]);
862
Chris Mason945d8962011-05-22 12:33:42 -0400863 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800864 mutex_unlock(&node->mutex);
865 goto do_again;
866
867insert_end:
868 mutex_unlock(&node->mutex);
869 return ret;
870}
871
872static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
873 struct btrfs_root *root,
874 struct btrfs_path *path,
875 struct btrfs_delayed_item *item)
876{
877 struct btrfs_delayed_item *curr, *next;
878 struct extent_buffer *leaf;
879 struct btrfs_key key;
880 struct list_head head;
881 int nitems, i, last_item;
882 int ret = 0;
883
884 BUG_ON(!path->nodes[0]);
885
886 leaf = path->nodes[0];
887
888 i = path->slots[0];
889 last_item = btrfs_header_nritems(leaf) - 1;
890 if (i > last_item)
891 return -ENOENT; /* FIXME: Is errno suitable? */
892
893 next = item;
894 INIT_LIST_HEAD(&head);
895 btrfs_item_key_to_cpu(leaf, &key, i);
896 nitems = 0;
897 /*
898 * count the number of the dir index items that we can delete in batch
899 */
900 while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
901 list_add_tail(&next->tree_list, &head);
902 nitems++;
903
904 curr = next;
905 next = __btrfs_next_delayed_item(curr);
906 if (!next)
907 break;
908
909 if (!btrfs_is_continuous_delayed_item(curr, next))
910 break;
911
912 i++;
913 if (i > last_item)
914 break;
915 btrfs_item_key_to_cpu(leaf, &key, i);
916 }
917
918 if (!nitems)
919 return 0;
920
921 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
922 if (ret)
923 goto out;
924
925 list_for_each_entry_safe(curr, next, &head, tree_list) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +0800926 btrfs_delayed_item_release_metadata(root, curr);
Miao Xie16cdcec2011-04-22 18:12:22 +0800927 list_del(&curr->tree_list);
928 btrfs_release_delayed_item(curr);
929 }
930
931out:
932 return ret;
933}
934
935static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
936 struct btrfs_path *path,
937 struct btrfs_root *root,
938 struct btrfs_delayed_node *node)
939{
940 struct btrfs_delayed_item *curr, *prev;
Josef Bacik351cbf62020-03-19 10:11:32 -0400941 unsigned int nofs_flag;
Miao Xie16cdcec2011-04-22 18:12:22 +0800942 int ret = 0;
943
944do_again:
945 mutex_lock(&node->mutex);
946 curr = __btrfs_first_delayed_deletion_item(node);
947 if (!curr)
948 goto delete_fail;
949
Josef Bacik351cbf62020-03-19 10:11:32 -0400950 nofs_flag = memalloc_nofs_save();
Miao Xie16cdcec2011-04-22 18:12:22 +0800951 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
Josef Bacik351cbf62020-03-19 10:11:32 -0400952 memalloc_nofs_restore(nofs_flag);
Miao Xie16cdcec2011-04-22 18:12:22 +0800953 if (ret < 0)
954 goto delete_fail;
955 else if (ret > 0) {
956 /*
957 * can't find the item which the node points to, so this node
958 * is invalid, just drop it.
959 */
960 prev = curr;
961 curr = __btrfs_next_delayed_item(prev);
962 btrfs_release_delayed_item(prev);
963 ret = 0;
Chris Mason945d8962011-05-22 12:33:42 -0400964 btrfs_release_path(path);
Fengguang Wu62095262012-08-04 01:45:02 -0600965 if (curr) {
966 mutex_unlock(&node->mutex);
Miao Xie16cdcec2011-04-22 18:12:22 +0800967 goto do_again;
Fengguang Wu62095262012-08-04 01:45:02 -0600968 } else
Miao Xie16cdcec2011-04-22 18:12:22 +0800969 goto delete_fail;
970 }
971
972 btrfs_batch_delete_items(trans, root, path, curr);
Chris Mason945d8962011-05-22 12:33:42 -0400973 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800974 mutex_unlock(&node->mutex);
975 goto do_again;
976
977delete_fail:
Chris Mason945d8962011-05-22 12:33:42 -0400978 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +0800979 mutex_unlock(&node->mutex);
980 return ret;
981}
982
983static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
984{
985 struct btrfs_delayed_root *delayed_root;
986
Miao Xie7cf35d92013-12-26 13:07:05 +0800987 if (delayed_node &&
988 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +0800989 BUG_ON(!delayed_node->root);
Miao Xie7cf35d92013-12-26 13:07:05 +0800990 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +0800991 delayed_node->count--;
992
993 delayed_root = delayed_node->root->fs_info->delayed_root;
Chris Masonde3cb942013-03-04 17:13:31 -0500994 finish_one_item(delayed_root);
Miao Xie16cdcec2011-04-22 18:12:22 +0800995 }
996}
997
Miao Xie67de1172013-12-26 13:07:06 +0800998static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
999{
1000 struct btrfs_delayed_root *delayed_root;
1001
1002 ASSERT(delayed_node->root);
1003 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1004 delayed_node->count--;
1005
1006 delayed_root = delayed_node->root->fs_info->delayed_root;
1007 finish_one_item(delayed_root);
1008}
1009
Miao Xie0e8c36a2012-12-19 06:59:51 +00001010static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1011 struct btrfs_root *root,
1012 struct btrfs_path *path,
1013 struct btrfs_delayed_node *node)
Miao Xie16cdcec2011-04-22 18:12:22 +08001014{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001015 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001016 struct btrfs_key key;
1017 struct btrfs_inode_item *inode_item;
1018 struct extent_buffer *leaf;
Josef Bacik351cbf62020-03-19 10:11:32 -04001019 unsigned int nofs_flag;
Miao Xie67de1172013-12-26 13:07:06 +08001020 int mod;
Miao Xie16cdcec2011-04-22 18:12:22 +08001021 int ret;
1022
Miao Xie16cdcec2011-04-22 18:12:22 +08001023 key.objectid = node->inode_id;
David Sterba962a2982014-06-04 18:41:45 +02001024 key.type = BTRFS_INODE_ITEM_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001025 key.offset = 0;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001026
Miao Xie67de1172013-12-26 13:07:06 +08001027 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1028 mod = -1;
1029 else
1030 mod = 1;
1031
Josef Bacik351cbf62020-03-19 10:11:32 -04001032 nofs_flag = memalloc_nofs_save();
Miao Xie67de1172013-12-26 13:07:06 +08001033 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
Josef Bacik351cbf62020-03-19 10:11:32 -04001034 memalloc_nofs_restore(nofs_flag);
Miao Xie16cdcec2011-04-22 18:12:22 +08001035 if (ret > 0) {
Chris Mason945d8962011-05-22 12:33:42 -04001036 btrfs_release_path(path);
Miao Xie16cdcec2011-04-22 18:12:22 +08001037 return -ENOENT;
1038 } else if (ret < 0) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001039 return ret;
1040 }
1041
Miao Xie16cdcec2011-04-22 18:12:22 +08001042 leaf = path->nodes[0];
1043 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1044 struct btrfs_inode_item);
1045 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1046 sizeof(struct btrfs_inode_item));
1047 btrfs_mark_buffer_dirty(leaf);
Miao Xie16cdcec2011-04-22 18:12:22 +08001048
Miao Xie67de1172013-12-26 13:07:06 +08001049 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1050 goto no_iref;
1051
1052 path->slots[0]++;
1053 if (path->slots[0] >= btrfs_header_nritems(leaf))
1054 goto search;
1055again:
1056 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1057 if (key.objectid != node->inode_id)
1058 goto out;
1059
1060 if (key.type != BTRFS_INODE_REF_KEY &&
1061 key.type != BTRFS_INODE_EXTREF_KEY)
1062 goto out;
1063
1064 /*
1065 * Delayed iref deletion is for the inode who has only one link,
1066 * so there is only one iref. The case that several irefs are
1067 * in the same item doesn't exist.
1068 */
1069 btrfs_del_item(trans, root, path);
1070out:
1071 btrfs_release_delayed_iref(node);
1072no_iref:
1073 btrfs_release_path(path);
1074err_out:
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001075 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
Miao Xie16cdcec2011-04-22 18:12:22 +08001076 btrfs_release_delayed_inode(node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001077
Miao Xie67de1172013-12-26 13:07:06 +08001078 return ret;
1079
1080search:
1081 btrfs_release_path(path);
1082
David Sterba962a2982014-06-04 18:41:45 +02001083 key.type = BTRFS_INODE_EXTREF_KEY;
Miao Xie67de1172013-12-26 13:07:06 +08001084 key.offset = -1;
Josef Bacik351cbf62020-03-19 10:11:32 -04001085
1086 nofs_flag = memalloc_nofs_save();
Miao Xie67de1172013-12-26 13:07:06 +08001087 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Josef Bacik351cbf62020-03-19 10:11:32 -04001088 memalloc_nofs_restore(nofs_flag);
Miao Xie67de1172013-12-26 13:07:06 +08001089 if (ret < 0)
1090 goto err_out;
1091 ASSERT(ret);
1092
1093 ret = 0;
1094 leaf = path->nodes[0];
1095 path->slots[0]--;
1096 goto again;
Miao Xie16cdcec2011-04-22 18:12:22 +08001097}
1098
Miao Xie0e8c36a2012-12-19 06:59:51 +00001099static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1100 struct btrfs_root *root,
1101 struct btrfs_path *path,
1102 struct btrfs_delayed_node *node)
1103{
1104 int ret;
1105
1106 mutex_lock(&node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001107 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001108 mutex_unlock(&node->mutex);
1109 return 0;
1110 }
1111
1112 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1113 mutex_unlock(&node->mutex);
1114 return ret;
1115}
1116
Miao Xie4ea41ce2012-12-19 06:59:03 +00001117static inline int
1118__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1119 struct btrfs_path *path,
1120 struct btrfs_delayed_node *node)
1121{
1122 int ret;
1123
1124 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1125 if (ret)
1126 return ret;
1127
1128 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1129 if (ret)
1130 return ret;
1131
1132 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1133 return ret;
1134}
1135
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001136/*
1137 * Called when committing the transaction.
1138 * Returns 0 on success.
1139 * Returns < 0 on error and returns with an aborted transaction with any
1140 * outstanding delayed items cleaned up.
1141 */
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001142static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
Miao Xie16cdcec2011-04-22 18:12:22 +08001143{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001144 struct btrfs_fs_info *fs_info = trans->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001145 struct btrfs_delayed_root *delayed_root;
1146 struct btrfs_delayed_node *curr_node, *prev_node;
1147 struct btrfs_path *path;
Miao Xie19fd2942011-06-15 10:47:30 +00001148 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001149 int ret = 0;
Josef Bacik96c3f432012-06-21 14:05:49 -04001150 bool count = (nr > 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001151
David Sterbabf31f872020-02-05 17:34:34 +01001152 if (TRANS_ABORTED(trans))
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001153 return -EIO;
1154
Miao Xie16cdcec2011-04-22 18:12:22 +08001155 path = btrfs_alloc_path();
1156 if (!path)
1157 return -ENOMEM;
1158 path->leave_spinning = 1;
1159
Miao Xie19fd2942011-06-15 10:47:30 +00001160 block_rsv = trans->block_rsv;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001161 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001162
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001163 delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001164
1165 curr_node = btrfs_first_delayed_node(delayed_root);
Josef Bacik96c3f432012-06-21 14:05:49 -04001166 while (curr_node && (!count || (count && nr--))) {
Miao Xie4ea41ce2012-12-19 06:59:03 +00001167 ret = __btrfs_commit_inode_delayed_items(trans, path,
1168 curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001169 if (ret) {
1170 btrfs_release_delayed_node(curr_node);
Josef Bacik96c3f432012-06-21 14:05:49 -04001171 curr_node = NULL;
Jeff Mahoney66642832016-06-10 18:19:25 -04001172 btrfs_abort_transaction(trans, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001173 break;
1174 }
1175
1176 prev_node = curr_node;
1177 curr_node = btrfs_next_delayed_node(curr_node);
1178 btrfs_release_delayed_node(prev_node);
1179 }
1180
Josef Bacik96c3f432012-06-21 14:05:49 -04001181 if (curr_node)
1182 btrfs_release_delayed_node(curr_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001183 btrfs_free_path(path);
Miao Xie19fd2942011-06-15 10:47:30 +00001184 trans->block_rsv = block_rsv;
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001185
Miao Xie16cdcec2011-04-22 18:12:22 +08001186 return ret;
1187}
1188
Nikolay Borisove5c304e62018-02-07 17:55:43 +02001189int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
Josef Bacik96c3f432012-06-21 14:05:49 -04001190{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001191 return __btrfs_run_delayed_items(trans, -1);
Josef Bacik96c3f432012-06-21 14:05:49 -04001192}
1193
Nikolay Borisove5c304e62018-02-07 17:55:43 +02001194int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
Josef Bacik96c3f432012-06-21 14:05:49 -04001195{
Nikolay Borisovb84acab2018-02-07 17:55:42 +02001196 return __btrfs_run_delayed_items(trans, nr);
Josef Bacik96c3f432012-06-21 14:05:49 -04001197}
1198
Miao Xie16cdcec2011-04-22 18:12:22 +08001199int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001200 struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001201{
Nikolay Borisov5f4b32e2017-01-10 20:35:41 +02001202 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001203 struct btrfs_path *path;
1204 struct btrfs_block_rsv *block_rsv;
Miao Xie16cdcec2011-04-22 18:12:22 +08001205 int ret;
1206
1207 if (!delayed_node)
1208 return 0;
1209
1210 mutex_lock(&delayed_node->mutex);
1211 if (!delayed_node->count) {
1212 mutex_unlock(&delayed_node->mutex);
1213 btrfs_release_delayed_node(delayed_node);
1214 return 0;
1215 }
1216 mutex_unlock(&delayed_node->mutex);
1217
Miao Xie4ea41ce2012-12-19 06:59:03 +00001218 path = btrfs_alloc_path();
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001219 if (!path) {
1220 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001221 return -ENOMEM;
Filipe David Borba Manana3c77bd92013-10-12 20:32:59 +01001222 }
Miao Xie4ea41ce2012-12-19 06:59:03 +00001223 path->leave_spinning = 1;
1224
1225 block_rsv = trans->block_rsv;
1226 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1227
1228 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1229
Miao Xie16cdcec2011-04-22 18:12:22 +08001230 btrfs_release_delayed_node(delayed_node);
Miao Xie4ea41ce2012-12-19 06:59:03 +00001231 btrfs_free_path(path);
1232 trans->block_rsv = block_rsv;
1233
Miao Xie16cdcec2011-04-22 18:12:22 +08001234 return ret;
1235}
1236
Nikolay Borisovaa790212017-01-10 20:35:40 +02001237int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
Miao Xie0e8c36a2012-12-19 06:59:51 +00001238{
David Sterba3ffbd682018-06-29 10:56:42 +02001239 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001240 struct btrfs_trans_handle *trans;
Nikolay Borisovaa790212017-01-10 20:35:40 +02001241 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001242 struct btrfs_path *path;
1243 struct btrfs_block_rsv *block_rsv;
1244 int ret;
1245
1246 if (!delayed_node)
1247 return 0;
1248
1249 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001250 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie0e8c36a2012-12-19 06:59:51 +00001251 mutex_unlock(&delayed_node->mutex);
1252 btrfs_release_delayed_node(delayed_node);
1253 return 0;
1254 }
1255 mutex_unlock(&delayed_node->mutex);
1256
1257 trans = btrfs_join_transaction(delayed_node->root);
1258 if (IS_ERR(trans)) {
1259 ret = PTR_ERR(trans);
1260 goto out;
1261 }
1262
1263 path = btrfs_alloc_path();
1264 if (!path) {
1265 ret = -ENOMEM;
1266 goto trans_out;
1267 }
1268 path->leave_spinning = 1;
1269
1270 block_rsv = trans->block_rsv;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001271 trans->block_rsv = &fs_info->delayed_block_rsv;
Miao Xie0e8c36a2012-12-19 06:59:51 +00001272
1273 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001274 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
Miao Xie0e8c36a2012-12-19 06:59:51 +00001275 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1276 path, delayed_node);
1277 else
1278 ret = 0;
1279 mutex_unlock(&delayed_node->mutex);
1280
1281 btrfs_free_path(path);
1282 trans->block_rsv = block_rsv;
1283trans_out:
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04001284 btrfs_end_transaction(trans);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001285 btrfs_btree_balance_dirty(fs_info);
Miao Xie0e8c36a2012-12-19 06:59:51 +00001286out:
1287 btrfs_release_delayed_node(delayed_node);
1288
1289 return ret;
1290}
1291
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001292void btrfs_remove_delayed_node(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001293{
1294 struct btrfs_delayed_node *delayed_node;
1295
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001296 delayed_node = READ_ONCE(inode->delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001297 if (!delayed_node)
1298 return;
1299
Nikolay Borisovf48d1cf2017-01-10 20:35:39 +02001300 inode->delayed_node = NULL;
Miao Xie16cdcec2011-04-22 18:12:22 +08001301 btrfs_release_delayed_node(delayed_node);
1302}
1303
Chris Masonde3cb942013-03-04 17:13:31 -05001304struct btrfs_async_delayed_work {
1305 struct btrfs_delayed_root *delayed_root;
1306 int nr;
Qu Wenruod458b052014-02-28 10:46:19 +08001307 struct btrfs_work work;
Miao Xie16cdcec2011-04-22 18:12:22 +08001308};
1309
Qu Wenruod458b052014-02-28 10:46:19 +08001310static void btrfs_async_run_delayed_root(struct btrfs_work *work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001311{
Chris Masonde3cb942013-03-04 17:13:31 -05001312 struct btrfs_async_delayed_work *async_work;
1313 struct btrfs_delayed_root *delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001314 struct btrfs_trans_handle *trans;
1315 struct btrfs_path *path;
1316 struct btrfs_delayed_node *delayed_node = NULL;
1317 struct btrfs_root *root;
Miao Xie19fd2942011-06-15 10:47:30 +00001318 struct btrfs_block_rsv *block_rsv;
Chris Masonde3cb942013-03-04 17:13:31 -05001319 int total_done = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001320
Chris Masonde3cb942013-03-04 17:13:31 -05001321 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1322 delayed_root = async_work->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001323
1324 path = btrfs_alloc_path();
1325 if (!path)
1326 goto out;
Miao Xie16cdcec2011-04-22 18:12:22 +08001327
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001328 do {
1329 if (atomic_read(&delayed_root->items) <
1330 BTRFS_DELAYED_BACKGROUND / 2)
1331 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001332
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001333 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1334 if (!delayed_node)
1335 break;
Chris Masonde3cb942013-03-04 17:13:31 -05001336
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001337 path->leave_spinning = 1;
1338 root = delayed_node->root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001339
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001340 trans = btrfs_join_transaction(root);
1341 if (IS_ERR(trans)) {
1342 btrfs_release_path(path);
1343 btrfs_release_prepared_delayed_node(delayed_node);
1344 total_done++;
1345 continue;
1346 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001347
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001348 block_rsv = trans->block_rsv;
1349 trans->block_rsv = &root->fs_info->delayed_block_rsv;
Miao Xie19fd2942011-06-15 10:47:30 +00001350
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001351 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001352
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001353 trans->block_rsv = block_rsv;
1354 btrfs_end_transaction(trans);
1355 btrfs_btree_balance_dirty_nodelay(root->fs_info);
Chris Masonde3cb942013-03-04 17:13:31 -05001356
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001357 btrfs_release_path(path);
1358 btrfs_release_prepared_delayed_node(delayed_node);
1359 total_done++;
Chris Masonde3cb942013-03-04 17:13:31 -05001360
Nikolay Borisov617c54a2017-10-23 13:51:48 +03001361 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1362 || total_done < async_work->nr);
Chris Masonde3cb942013-03-04 17:13:31 -05001363
Miao Xie16cdcec2011-04-22 18:12:22 +08001364 btrfs_free_path(path);
1365out:
Chris Masonde3cb942013-03-04 17:13:31 -05001366 wake_up(&delayed_root->wait);
1367 kfree(async_work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001368}
1369
Miao Xie16cdcec2011-04-22 18:12:22 +08001370
Chris Masonde3cb942013-03-04 17:13:31 -05001371static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
Daniel Dresslera585e942014-11-17 22:05:02 +09001372 struct btrfs_fs_info *fs_info, int nr)
Chris Masonde3cb942013-03-04 17:13:31 -05001373{
1374 struct btrfs_async_delayed_work *async_work;
1375
Chris Masonde3cb942013-03-04 17:13:31 -05001376 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1377 if (!async_work)
Miao Xie16cdcec2011-04-22 18:12:22 +08001378 return -ENOMEM;
Miao Xie16cdcec2011-04-22 18:12:22 +08001379
Chris Masonde3cb942013-03-04 17:13:31 -05001380 async_work->delayed_root = delayed_root;
Omar Sandovala0cac0e2019-09-16 11:30:57 -07001381 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1382 NULL);
Chris Masonde3cb942013-03-04 17:13:31 -05001383 async_work->nr = nr;
Miao Xie16cdcec2011-04-22 18:12:22 +08001384
Daniel Dresslera585e942014-11-17 22:05:02 +09001385 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
Miao Xie16cdcec2011-04-22 18:12:22 +08001386 return 0;
1387}
1388
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001389void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
Chris Masone9993762011-06-17 16:14:09 -04001390{
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001391 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
Chris Masone9993762011-06-17 16:14:09 -04001392}
1393
Miao Xie03538082013-12-26 13:07:03 +08001394static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
Chris Masonde3cb942013-03-04 17:13:31 -05001395{
1396 int val = atomic_read(&delayed_root->items_seq);
1397
Miao Xie03538082013-12-26 13:07:03 +08001398 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
Chris Masonde3cb942013-03-04 17:13:31 -05001399 return 1;
Miao Xie03538082013-12-26 13:07:03 +08001400
1401 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1402 return 1;
1403
Chris Masonde3cb942013-03-04 17:13:31 -05001404 return 0;
1405}
1406
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001407void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
Miao Xie16cdcec2011-04-22 18:12:22 +08001408{
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001409 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
Miao Xie16cdcec2011-04-22 18:12:22 +08001410
Nikolay Borisov85777872017-10-23 13:51:49 +03001411 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1412 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
Miao Xie16cdcec2011-04-22 18:12:22 +08001413 return;
1414
1415 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
Miao Xie03538082013-12-26 13:07:03 +08001416 int seq;
Miao Xie16cdcec2011-04-22 18:12:22 +08001417 int ret;
Miao Xie03538082013-12-26 13:07:03 +08001418
1419 seq = atomic_read(&delayed_root->items_seq);
Chris Masonde3cb942013-03-04 17:13:31 -05001420
Daniel Dresslera585e942014-11-17 22:05:02 +09001421 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001422 if (ret)
1423 return;
1424
Miao Xie03538082013-12-26 13:07:03 +08001425 wait_event_interruptible(delayed_root->wait,
1426 could_end_wait(delayed_root, seq));
Miao Xie4dd466d2013-12-26 13:07:02 +08001427 return;
Miao Xie16cdcec2011-04-22 18:12:22 +08001428 }
1429
Daniel Dresslera585e942014-11-17 22:05:02 +09001430 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
Miao Xie16cdcec2011-04-22 18:12:22 +08001431}
1432
Jeff Mahoney79787ea2012-03-12 16:03:00 +01001433/* Will return 0 or -ENOMEM */
Miao Xie16cdcec2011-04-22 18:12:22 +08001434int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001435 const char *name, int name_len,
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001436 struct btrfs_inode *dir,
Miao Xie16cdcec2011-04-22 18:12:22 +08001437 struct btrfs_disk_key *disk_key, u8 type,
1438 u64 index)
1439{
1440 struct btrfs_delayed_node *delayed_node;
1441 struct btrfs_delayed_item *delayed_item;
1442 struct btrfs_dir_item *dir_item;
1443 int ret;
1444
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001445 delayed_node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001446 if (IS_ERR(delayed_node))
1447 return PTR_ERR(delayed_node);
1448
1449 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1450 if (!delayed_item) {
1451 ret = -ENOMEM;
1452 goto release_node;
1453 }
1454
Nikolay Borisov6f45d182017-01-10 20:35:35 +02001455 delayed_item->key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001456 delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001457 delayed_item->key.offset = index;
1458
1459 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1460 dir_item->location = *disk_key;
Qu Wenruo3cae2102013-07-16 11:19:18 +08001461 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1462 btrfs_set_stack_dir_data_len(dir_item, 0);
1463 btrfs_set_stack_dir_name_len(dir_item, name_len);
1464 btrfs_set_stack_dir_type(dir_item, type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001465 memcpy((char *)(dir_item + 1), name, name_len);
1466
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001467 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, delayed_item);
Josef Bacik8c2a3ca2012-01-10 10:31:31 -05001468 /*
1469 * we have reserved enough space when we start a new transaction,
1470 * so reserving metadata failure is impossible
1471 */
1472 BUG_ON(ret);
1473
Miao Xie16cdcec2011-04-22 18:12:22 +08001474 mutex_lock(&delayed_node->mutex);
1475 ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1476 if (unlikely(ret)) {
Lu Fengqi4465c8b2018-08-01 11:32:25 +08001477 btrfs_err(trans->fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001478 "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09001479 name_len, name, delayed_node->root->root_key.objectid,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001480 delayed_node->inode_id, ret);
Miao Xie16cdcec2011-04-22 18:12:22 +08001481 BUG();
1482 }
1483 mutex_unlock(&delayed_node->mutex);
1484
1485release_node:
1486 btrfs_release_delayed_node(delayed_node);
1487 return ret;
1488}
1489
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001490static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
Miao Xie16cdcec2011-04-22 18:12:22 +08001491 struct btrfs_delayed_node *node,
1492 struct btrfs_key *key)
1493{
1494 struct btrfs_delayed_item *item;
1495
1496 mutex_lock(&node->mutex);
1497 item = __btrfs_lookup_delayed_insertion_item(node, key);
1498 if (!item) {
1499 mutex_unlock(&node->mutex);
1500 return 1;
1501 }
1502
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001503 btrfs_delayed_item_release_metadata(node->root, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001504 btrfs_release_delayed_item(item);
1505 mutex_unlock(&node->mutex);
1506 return 0;
1507}
1508
1509int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001510 struct btrfs_inode *dir, u64 index)
Miao Xie16cdcec2011-04-22 18:12:22 +08001511{
1512 struct btrfs_delayed_node *node;
1513 struct btrfs_delayed_item *item;
1514 struct btrfs_key item_key;
1515 int ret;
1516
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001517 node = btrfs_get_or_create_delayed_node(dir);
Miao Xie16cdcec2011-04-22 18:12:22 +08001518 if (IS_ERR(node))
1519 return PTR_ERR(node);
1520
Nikolay Borisove67bbbb2017-01-10 20:35:36 +02001521 item_key.objectid = btrfs_ino(dir);
David Sterba962a2982014-06-04 18:41:45 +02001522 item_key.type = BTRFS_DIR_INDEX_KEY;
Miao Xie16cdcec2011-04-22 18:12:22 +08001523 item_key.offset = index;
1524
Lu Fengqi9add2942018-08-01 11:32:26 +08001525 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node,
1526 &item_key);
Miao Xie16cdcec2011-04-22 18:12:22 +08001527 if (!ret)
1528 goto end;
1529
1530 item = btrfs_alloc_delayed_item(0);
1531 if (!item) {
1532 ret = -ENOMEM;
1533 goto end;
1534 }
1535
1536 item->key = item_key;
1537
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001538 ret = btrfs_delayed_item_reserve_metadata(trans, dir->root, item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001539 /*
1540 * we have reserved enough space when we start a new transaction,
1541 * so reserving metadata failure is impossible.
1542 */
Qu Wenruo933c22a2019-07-16 17:00:32 +08001543 if (ret < 0) {
1544 btrfs_err(trans->fs_info,
1545"metadata reservation failed for delayed dir item deltiona, should have been reserved");
1546 btrfs_release_delayed_item(item);
1547 goto end;
1548 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001549
1550 mutex_lock(&node->mutex);
1551 ret = __btrfs_add_delayed_deletion_item(node, item);
1552 if (unlikely(ret)) {
Lu Fengqi9add2942018-08-01 11:32:26 +08001553 btrfs_err(trans->fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -04001554 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09001555 index, node->root->root_key.objectid,
1556 node->inode_id, ret);
Qu Wenruo933c22a2019-07-16 17:00:32 +08001557 btrfs_delayed_item_release_metadata(dir->root, item);
1558 btrfs_release_delayed_item(item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001559 }
1560 mutex_unlock(&node->mutex);
1561end:
1562 btrfs_release_delayed_node(node);
1563 return ret;
1564}
1565
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001566int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001567{
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001568 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001569
1570 if (!delayed_node)
1571 return -ENOENT;
1572
1573 /*
1574 * Since we have held i_mutex of this directory, it is impossible that
1575 * a new directory index is added into the delayed node and index_cnt
1576 * is updated now. So we needn't lock the delayed node.
1577 */
Miao Xie2f7e33d2011-06-23 07:27:13 +00001578 if (!delayed_node->index_cnt) {
1579 btrfs_release_delayed_node(delayed_node);
Miao Xie16cdcec2011-04-22 18:12:22 +08001580 return -EINVAL;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001581 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001582
Nikolay Borisovf5cc7b82017-01-10 20:35:42 +02001583 inode->index_cnt = delayed_node->index_cnt;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001584 btrfs_release_delayed_node(delayed_node);
1585 return 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001586}
1587
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001588bool btrfs_readdir_get_delayed_items(struct inode *inode,
1589 struct list_head *ins_list,
1590 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001591{
1592 struct btrfs_delayed_node *delayed_node;
1593 struct btrfs_delayed_item *item;
1594
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001595 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001596 if (!delayed_node)
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001597 return false;
1598
1599 /*
1600 * We can only do one readdir with delayed items at a time because of
1601 * item->readdir_list.
1602 */
1603 inode_unlock_shared(inode);
1604 inode_lock(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001605
1606 mutex_lock(&delayed_node->mutex);
1607 item = __btrfs_first_delayed_insertion_item(delayed_node);
1608 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001609 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001610 list_add_tail(&item->readdir_list, ins_list);
1611 item = __btrfs_next_delayed_item(item);
1612 }
1613
1614 item = __btrfs_first_delayed_deletion_item(delayed_node);
1615 while (item) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001616 refcount_inc(&item->refs);
Miao Xie16cdcec2011-04-22 18:12:22 +08001617 list_add_tail(&item->readdir_list, del_list);
1618 item = __btrfs_next_delayed_item(item);
1619 }
1620 mutex_unlock(&delayed_node->mutex);
1621 /*
1622 * This delayed node is still cached in the btrfs inode, so refs
1623 * must be > 1 now, and we needn't check it is going to be freed
1624 * or not.
1625 *
1626 * Besides that, this function is used to read dir, we do not
1627 * insert/delete delayed items in this period. So we also needn't
1628 * requeue or dequeue this delayed node.
1629 */
Elena Reshetova6de5f182017-03-03 10:55:16 +02001630 refcount_dec(&delayed_node->refs);
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001631
1632 return true;
Miao Xie16cdcec2011-04-22 18:12:22 +08001633}
1634
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001635void btrfs_readdir_put_delayed_items(struct inode *inode,
1636 struct list_head *ins_list,
1637 struct list_head *del_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001638{
1639 struct btrfs_delayed_item *curr, *next;
1640
1641 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1642 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001643 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001644 kfree(curr);
1645 }
1646
1647 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1648 list_del(&curr->readdir_list);
Elena Reshetova089e77e2017-03-03 10:55:17 +02001649 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001650 kfree(curr);
1651 }
Omar Sandoval02dbfc92016-05-20 13:50:33 -07001652
1653 /*
1654 * The VFS is going to do up_read(), so we need to downgrade back to a
1655 * read lock.
1656 */
1657 downgrade_write(&inode->i_rwsem);
Miao Xie16cdcec2011-04-22 18:12:22 +08001658}
1659
1660int btrfs_should_delete_dir_index(struct list_head *del_list,
1661 u64 index)
1662{
Josef Bacike4fd4932018-01-23 15:17:05 -05001663 struct btrfs_delayed_item *curr;
1664 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001665
Josef Bacike4fd4932018-01-23 15:17:05 -05001666 list_for_each_entry(curr, del_list, readdir_list) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001667 if (curr->key.offset > index)
1668 break;
Josef Bacike4fd4932018-01-23 15:17:05 -05001669 if (curr->key.offset == index) {
1670 ret = 1;
1671 break;
1672 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001673 }
Josef Bacike4fd4932018-01-23 15:17:05 -05001674 return ret;
Miao Xie16cdcec2011-04-22 18:12:22 +08001675}
1676
1677/*
1678 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1679 *
1680 */
Al Viro9cdda8d2013-05-22 16:48:09 -04001681int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
Jeff Mahoneyd2fbb2b2016-11-05 13:26:35 -04001682 struct list_head *ins_list)
Miao Xie16cdcec2011-04-22 18:12:22 +08001683{
1684 struct btrfs_dir_item *di;
1685 struct btrfs_delayed_item *curr, *next;
1686 struct btrfs_key location;
1687 char *name;
1688 int name_len;
1689 int over = 0;
1690 unsigned char d_type;
1691
1692 if (list_empty(ins_list))
1693 return 0;
1694
1695 /*
1696 * Changing the data of the delayed item is impossible. So
1697 * we needn't lock them. And we have held i_mutex of the
1698 * directory, nobody can delete any directory indexes now.
1699 */
1700 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1701 list_del(&curr->readdir_list);
1702
Al Viro9cdda8d2013-05-22 16:48:09 -04001703 if (curr->key.offset < ctx->pos) {
Elena Reshetova089e77e2017-03-03 10:55:17 +02001704 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001705 kfree(curr);
1706 continue;
1707 }
1708
Al Viro9cdda8d2013-05-22 16:48:09 -04001709 ctx->pos = curr->key.offset;
Miao Xie16cdcec2011-04-22 18:12:22 +08001710
1711 di = (struct btrfs_dir_item *)curr->data;
1712 name = (char *)(di + 1);
Qu Wenruo3cae2102013-07-16 11:19:18 +08001713 name_len = btrfs_stack_dir_name_len(di);
Miao Xie16cdcec2011-04-22 18:12:22 +08001714
Phillip Potter7d157c32019-03-26 21:39:34 +00001715 d_type = fs_ftype_to_dtype(di->type);
Miao Xie16cdcec2011-04-22 18:12:22 +08001716 btrfs_disk_key_to_cpu(&location, &di->location);
1717
Al Viro9cdda8d2013-05-22 16:48:09 -04001718 over = !dir_emit(ctx, name, name_len,
Miao Xie16cdcec2011-04-22 18:12:22 +08001719 location.objectid, d_type);
1720
Elena Reshetova089e77e2017-03-03 10:55:17 +02001721 if (refcount_dec_and_test(&curr->refs))
Miao Xie16cdcec2011-04-22 18:12:22 +08001722 kfree(curr);
1723
1724 if (over)
1725 return 1;
Josef Bacik42e9cc42017-07-24 15:14:26 -04001726 ctx->pos++;
Miao Xie16cdcec2011-04-22 18:12:22 +08001727 }
1728 return 0;
1729}
1730
Miao Xie16cdcec2011-04-22 18:12:22 +08001731static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1732 struct btrfs_inode_item *inode_item,
1733 struct inode *inode)
1734{
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001735 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1736 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001737 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1738 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1739 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1740 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1741 btrfs_set_stack_inode_generation(inode_item,
1742 BTRFS_I(inode)->generation);
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001743 btrfs_set_stack_inode_sequence(inode_item,
1744 inode_peek_iversion(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001745 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1746 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1747 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
Chris Masonff5714c2011-05-28 07:00:39 -04001748 btrfs_set_stack_inode_block_group(inode_item, 0);
Miao Xie16cdcec2011-04-22 18:12:22 +08001749
David Sterbaa937b972014-12-12 17:39:12 +01001750 btrfs_set_stack_timespec_sec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001751 inode->i_atime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001752 btrfs_set_stack_timespec_nsec(&inode_item->atime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001753 inode->i_atime.tv_nsec);
1754
David Sterbaa937b972014-12-12 17:39:12 +01001755 btrfs_set_stack_timespec_sec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001756 inode->i_mtime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001757 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001758 inode->i_mtime.tv_nsec);
1759
David Sterbaa937b972014-12-12 17:39:12 +01001760 btrfs_set_stack_timespec_sec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001761 inode->i_ctime.tv_sec);
David Sterbaa937b972014-12-12 17:39:12 +01001762 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
Miao Xie16cdcec2011-04-22 18:12:22 +08001763 inode->i_ctime.tv_nsec);
chandan r9cc97d62012-07-04 12:48:07 +05301764
1765 btrfs_set_stack_timespec_sec(&inode_item->otime,
1766 BTRFS_I(inode)->i_otime.tv_sec);
1767 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1768 BTRFS_I(inode)->i_otime.tv_nsec);
Miao Xie16cdcec2011-04-22 18:12:22 +08001769}
1770
Miao Xie2f7e33d2011-06-23 07:27:13 +00001771int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1772{
Josef Bacik9ddc9592020-01-17 09:02:22 -05001773 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001774 struct btrfs_delayed_node *delayed_node;
1775 struct btrfs_inode_item *inode_item;
Miao Xie2f7e33d2011-06-23 07:27:13 +00001776
Nikolay Borisov340c6ca2017-01-10 20:35:32 +02001777 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001778 if (!delayed_node)
1779 return -ENOENT;
1780
1781 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001782 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie2f7e33d2011-06-23 07:27:13 +00001783 mutex_unlock(&delayed_node->mutex);
1784 btrfs_release_delayed_node(delayed_node);
1785 return -ENOENT;
1786 }
1787
1788 inode_item = &delayed_node->inode_item;
1789
Eric W. Biederman2f2f43d2012-02-10 11:05:07 -08001790 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1791 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
Nikolay Borisov6ef06d22017-02-20 13:50:34 +02001792 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
Josef Bacik9ddc9592020-01-17 09:02:22 -05001793 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1794 round_up(i_size_read(inode), fs_info->sectorsize));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001795 inode->i_mode = btrfs_stack_inode_mode(inode_item);
Miklos Szeredibfe86842011-10-28 14:13:29 +02001796 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001797 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1798 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
Yang Dongsheng6e17d302015-04-09 12:08:43 +08001799 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1800
Jeff Laytonc7f88c42017-12-11 06:35:12 -05001801 inode_set_iversion_queried(inode,
1802 btrfs_stack_inode_sequence(inode_item));
Miao Xie2f7e33d2011-06-23 07:27:13 +00001803 inode->i_rdev = 0;
1804 *rdev = btrfs_stack_inode_rdev(inode_item);
1805 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1806
David Sterbaa937b972014-12-12 17:39:12 +01001807 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1808 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001809
David Sterbaa937b972014-12-12 17:39:12 +01001810 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1811 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001812
David Sterbaa937b972014-12-12 17:39:12 +01001813 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1814 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
Miao Xie2f7e33d2011-06-23 07:27:13 +00001815
chandan r9cc97d62012-07-04 12:48:07 +05301816 BTRFS_I(inode)->i_otime.tv_sec =
1817 btrfs_stack_timespec_sec(&inode_item->otime);
1818 BTRFS_I(inode)->i_otime.tv_nsec =
1819 btrfs_stack_timespec_nsec(&inode_item->otime);
1820
Miao Xie2f7e33d2011-06-23 07:27:13 +00001821 inode->i_generation = BTRFS_I(inode)->generation;
1822 BTRFS_I(inode)->index_cnt = (u64)-1;
1823
1824 mutex_unlock(&delayed_node->mutex);
1825 btrfs_release_delayed_node(delayed_node);
1826 return 0;
1827}
1828
Miao Xie16cdcec2011-04-22 18:12:22 +08001829int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1830 struct btrfs_root *root, struct inode *inode)
1831{
1832 struct btrfs_delayed_node *delayed_node;
David Sterbaaa0467d2011-06-03 16:29:08 +02001833 int ret = 0;
Miao Xie16cdcec2011-04-22 18:12:22 +08001834
Nikolay Borisove5517a72017-01-10 20:35:33 +02001835 delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
Miao Xie16cdcec2011-04-22 18:12:22 +08001836 if (IS_ERR(delayed_node))
1837 return PTR_ERR(delayed_node);
1838
1839 mutex_lock(&delayed_node->mutex);
Miao Xie7cf35d92013-12-26 13:07:05 +08001840 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Miao Xie16cdcec2011-04-22 18:12:22 +08001841 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1842 goto release_node;
1843 }
1844
Nikolay Borisovfcabdd12017-01-10 20:35:34 +02001845 ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
Josef Bacik7fd2ae22011-11-08 15:47:34 -05001846 delayed_node);
Josef Bacikc06a0e12011-11-04 19:56:02 -04001847 if (ret)
1848 goto release_node;
Miao Xie16cdcec2011-04-22 18:12:22 +08001849
1850 fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
Miao Xie7cf35d92013-12-26 13:07:05 +08001851 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
Miao Xie16cdcec2011-04-22 18:12:22 +08001852 delayed_node->count++;
1853 atomic_inc(&root->fs_info->delayed_root->items);
1854release_node:
1855 mutex_unlock(&delayed_node->mutex);
1856 btrfs_release_delayed_node(delayed_node);
1857 return ret;
1858}
1859
Nikolay Borisove07222c2017-01-10 20:35:37 +02001860int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
Miao Xie67de1172013-12-26 13:07:06 +08001861{
David Sterba3ffbd682018-06-29 10:56:42 +02001862 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Miao Xie67de1172013-12-26 13:07:06 +08001863 struct btrfs_delayed_node *delayed_node;
1864
Chris Mason6f896052014-12-31 12:18:29 -05001865 /*
1866 * we don't do delayed inode updates during log recovery because it
1867 * leads to enospc problems. This means we also can't do
1868 * delayed inode refs
1869 */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001870 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
Chris Mason6f896052014-12-31 12:18:29 -05001871 return -EAGAIN;
1872
Nikolay Borisove07222c2017-01-10 20:35:37 +02001873 delayed_node = btrfs_get_or_create_delayed_node(inode);
Miao Xie67de1172013-12-26 13:07:06 +08001874 if (IS_ERR(delayed_node))
1875 return PTR_ERR(delayed_node);
1876
1877 /*
1878 * We don't reserve space for inode ref deletion is because:
1879 * - We ONLY do async inode ref deletion for the inode who has only
1880 * one link(i_nlink == 1), it means there is only one inode ref.
1881 * And in most case, the inode ref and the inode item are in the
1882 * same leaf, and we will deal with them at the same time.
1883 * Since we are sure we will reserve the space for the inode item,
1884 * it is unnecessary to reserve space for inode ref deletion.
1885 * - If the inode ref and the inode item are not in the same leaf,
1886 * We also needn't worry about enospc problem, because we reserve
1887 * much more space for the inode update than it needs.
1888 * - At the worst, we can steal some space from the global reservation.
1889 * It is very rare.
1890 */
1891 mutex_lock(&delayed_node->mutex);
1892 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1893 goto release_node;
1894
1895 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1896 delayed_node->count++;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001897 atomic_inc(&fs_info->delayed_root->items);
Miao Xie67de1172013-12-26 13:07:06 +08001898release_node:
1899 mutex_unlock(&delayed_node->mutex);
1900 btrfs_release_delayed_node(delayed_node);
1901 return 0;
1902}
1903
Miao Xie16cdcec2011-04-22 18:12:22 +08001904static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1905{
1906 struct btrfs_root *root = delayed_node->root;
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001907 struct btrfs_fs_info *fs_info = root->fs_info;
Miao Xie16cdcec2011-04-22 18:12:22 +08001908 struct btrfs_delayed_item *curr_item, *prev_item;
1909
1910 mutex_lock(&delayed_node->mutex);
1911 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1912 while (curr_item) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001913 btrfs_delayed_item_release_metadata(root, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001914 prev_item = curr_item;
1915 curr_item = __btrfs_next_delayed_item(prev_item);
1916 btrfs_release_delayed_item(prev_item);
1917 }
1918
1919 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1920 while (curr_item) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001921 btrfs_delayed_item_release_metadata(root, curr_item);
Miao Xie16cdcec2011-04-22 18:12:22 +08001922 prev_item = curr_item;
1923 curr_item = __btrfs_next_delayed_item(prev_item);
1924 btrfs_release_delayed_item(prev_item);
1925 }
1926
Miao Xie67de1172013-12-26 13:07:06 +08001927 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1928 btrfs_release_delayed_iref(delayed_node);
1929
Miao Xie7cf35d92013-12-26 13:07:05 +08001930 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
Qu Wenruo4f5427c2017-12-12 15:34:33 +08001931 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
Miao Xie16cdcec2011-04-22 18:12:22 +08001932 btrfs_release_delayed_inode(delayed_node);
1933 }
1934 mutex_unlock(&delayed_node->mutex);
1935}
1936
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001937void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
Miao Xie16cdcec2011-04-22 18:12:22 +08001938{
1939 struct btrfs_delayed_node *delayed_node;
1940
Nikolay Borisov4ccb5c72017-01-10 20:35:38 +02001941 delayed_node = btrfs_get_delayed_node(inode);
Miao Xie16cdcec2011-04-22 18:12:22 +08001942 if (!delayed_node)
1943 return;
1944
1945 __btrfs_kill_delayed_node(delayed_node);
1946 btrfs_release_delayed_node(delayed_node);
1947}
1948
1949void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1950{
1951 u64 inode_id = 0;
1952 struct btrfs_delayed_node *delayed_nodes[8];
1953 int i, n;
1954
1955 while (1) {
1956 spin_lock(&root->inode_lock);
1957 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1958 (void **)delayed_nodes, inode_id,
1959 ARRAY_SIZE(delayed_nodes));
1960 if (!n) {
1961 spin_unlock(&root->inode_lock);
1962 break;
1963 }
1964
1965 inode_id = delayed_nodes[n - 1]->inode_id + 1;
Josef Bacikbaf320b2019-09-26 08:29:32 -04001966 for (i = 0; i < n; i++) {
1967 /*
1968 * Don't increase refs in case the node is dead and
1969 * about to be removed from the tree in the loop below
1970 */
1971 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1972 delayed_nodes[i] = NULL;
1973 }
Miao Xie16cdcec2011-04-22 18:12:22 +08001974 spin_unlock(&root->inode_lock);
1975
1976 for (i = 0; i < n; i++) {
Josef Bacikbaf320b2019-09-26 08:29:32 -04001977 if (!delayed_nodes[i])
1978 continue;
Miao Xie16cdcec2011-04-22 18:12:22 +08001979 __btrfs_kill_delayed_node(delayed_nodes[i]);
1980 btrfs_release_delayed_node(delayed_nodes[i]);
1981 }
1982 }
1983}
Miao Xie67cde342012-06-14 02:23:22 -06001984
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001985void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
Miao Xie67cde342012-06-14 02:23:22 -06001986{
Miao Xie67cde342012-06-14 02:23:22 -06001987 struct btrfs_delayed_node *curr_node, *prev_node;
1988
Jeff Mahoneyccdf9b32016-06-22 18:54:23 -04001989 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
Miao Xie67cde342012-06-14 02:23:22 -06001990 while (curr_node) {
1991 __btrfs_kill_delayed_node(curr_node);
1992
1993 prev_node = curr_node;
1994 curr_node = btrfs_next_delayed_node(curr_node);
1995 btrfs_release_delayed_node(prev_node);
1996 }
1997}
1998