blob: fed1cecf801fc5a0c2e0f831142461cf53e4c4fb [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Arne Jansenbed92ea2012-06-28 18:03:02 +02002/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
Arne Jansenbed92ea2012-06-28 18:03:02 +02004 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>
9#include <linux/blkdev.h>
10#include <linux/rbtree.h>
11#include <linux/slab.h>
12#include <linux/workqueue.h>
Filipe Brandenburger55e301f2013-01-29 06:04:50 +000013#include <linux/btrfs.h>
Filipe Manana7aa6d352020-11-23 18:30:54 +000014#include <linux/sched/mm.h>
Arne Jansenbed92ea2012-06-28 18:03:02 +020015
16#include "ctree.h"
17#include "transaction.h"
18#include "disk-io.h"
19#include "locking.h"
20#include "ulist.h"
Arne Jansenbed92ea2012-06-28 18:03:02 +020021#include "backref.h"
Jan Schmidt2f232032013-04-25 16:04:51 +000022#include "extent_io.h"
Josef Bacikfcebe452014-05-13 17:30:47 -070023#include "qgroup.h"
Josef Bacikaac00232019-06-20 15:37:44 -040024#include "block-group.h"
Qu Wenruo49e5fb42020-06-28 13:07:15 +080025#include "sysfs.h"
Qu Wenruoe69bcee2015-04-17 10:23:16 +080026
Arne Jansenbed92ea2012-06-28 18:03:02 +020027/* TODO XXX FIXME
28 * - subvol delete -> delete when ref goes to 0? delete limits also?
29 * - reorganize keys
30 * - compressed
31 * - sync
Arne Jansenbed92ea2012-06-28 18:03:02 +020032 * - copy also limits on subvol creation
33 * - limit
Andrea Gelmini52042d82018-11-28 12:05:13 +010034 * - caches for ulists
Arne Jansenbed92ea2012-06-28 18:03:02 +020035 * - performance benchmarks
36 * - check all ioctl parameters
37 */
38
Qu Wenruof59c0342017-12-12 15:34:24 +080039/*
40 * Helpers to access qgroup reservation
41 *
42 * Callers should ensure the lock context and type are valid
43 */
44
45static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup)
46{
47 u64 ret = 0;
48 int i;
49
50 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
51 ret += qgroup->rsv.values[i];
52
53 return ret;
54}
55
56#ifdef CONFIG_BTRFS_DEBUG
57static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type)
58{
59 if (type == BTRFS_QGROUP_RSV_DATA)
60 return "data";
Qu Wenruo733e03a2017-12-12 15:34:29 +080061 if (type == BTRFS_QGROUP_RSV_META_PERTRANS)
62 return "meta_pertrans";
63 if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
64 return "meta_prealloc";
Qu Wenruof59c0342017-12-12 15:34:24 +080065 return NULL;
66}
67#endif
68
Qu Wenruo64ee4e72017-12-12 15:34:27 +080069static void qgroup_rsv_add(struct btrfs_fs_info *fs_info,
70 struct btrfs_qgroup *qgroup, u64 num_bytes,
Qu Wenruof59c0342017-12-12 15:34:24 +080071 enum btrfs_qgroup_rsv_type type)
72{
Qu Wenruo64ee4e72017-12-12 15:34:27 +080073 trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type);
Qu Wenruof59c0342017-12-12 15:34:24 +080074 qgroup->rsv.values[type] += num_bytes;
75}
76
Qu Wenruo64ee4e72017-12-12 15:34:27 +080077static void qgroup_rsv_release(struct btrfs_fs_info *fs_info,
78 struct btrfs_qgroup *qgroup, u64 num_bytes,
Qu Wenruof59c0342017-12-12 15:34:24 +080079 enum btrfs_qgroup_rsv_type type)
80{
Qu Wenruo64ee4e72017-12-12 15:34:27 +080081 trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type);
Qu Wenruof59c0342017-12-12 15:34:24 +080082 if (qgroup->rsv.values[type] >= num_bytes) {
83 qgroup->rsv.values[type] -= num_bytes;
84 return;
85 }
86#ifdef CONFIG_BTRFS_DEBUG
87 WARN_RATELIMIT(1,
88 "qgroup %llu %s reserved space underflow, have %llu to free %llu",
89 qgroup->qgroupid, qgroup_rsv_type_str(type),
90 qgroup->rsv.values[type], num_bytes);
91#endif
92 qgroup->rsv.values[type] = 0;
93}
94
Qu Wenruo64ee4e72017-12-12 15:34:27 +080095static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info,
96 struct btrfs_qgroup *dest,
97 struct btrfs_qgroup *src)
Qu Wenruof59c0342017-12-12 15:34:24 +080098{
99 int i;
100
101 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
Qu Wenruo64ee4e72017-12-12 15:34:27 +0800102 qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i);
Qu Wenruof59c0342017-12-12 15:34:24 +0800103}
104
Qu Wenruo64ee4e72017-12-12 15:34:27 +0800105static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info,
106 struct btrfs_qgroup *dest,
Qu Wenruof59c0342017-12-12 15:34:24 +0800107 struct btrfs_qgroup *src)
108{
109 int i;
110
111 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++)
Qu Wenruo64ee4e72017-12-12 15:34:27 +0800112 qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i);
Qu Wenruof59c0342017-12-12 15:34:24 +0800113}
114
Qu Wenruo9c542132015-03-12 16:10:13 +0800115static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq,
116 int mod)
117{
118 if (qg->old_refcnt < seq)
119 qg->old_refcnt = seq;
120 qg->old_refcnt += mod;
121}
122
123static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq,
124 int mod)
125{
126 if (qg->new_refcnt < seq)
127 qg->new_refcnt = seq;
128 qg->new_refcnt += mod;
129}
130
131static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq)
132{
133 if (qg->old_refcnt < seq)
134 return 0;
135 return qg->old_refcnt - seq;
136}
137
138static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq)
139{
140 if (qg->new_refcnt < seq)
141 return 0;
142 return qg->new_refcnt - seq;
143}
144
Arne Jansenbed92ea2012-06-28 18:03:02 +0200145/*
146 * glue structure to represent the relations between qgroups.
147 */
148struct btrfs_qgroup_list {
149 struct list_head next_group;
150 struct list_head next_member;
151 struct btrfs_qgroup *group;
152 struct btrfs_qgroup *member;
153};
154
David Sterbaef2fff62016-10-26 16:23:50 +0200155static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
156{
157 return (u64)(uintptr_t)qg;
158}
159
160static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
161{
162 return (struct btrfs_qgroup *)(uintptr_t)n->aux;
163}
Josef Bacikfcebe452014-05-13 17:30:47 -0700164
Jan Schmidtb382a322013-05-28 15:47:24 +0000165static int
166qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
167 int init_flags);
168static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
Jan Schmidt2f232032013-04-25 16:04:51 +0000169
Wang Shilong58400fc2013-04-07 10:50:17 +0000170/* must be called with qgroup_ioctl_lock held */
Arne Jansenbed92ea2012-06-28 18:03:02 +0200171static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
172 u64 qgroupid)
173{
174 struct rb_node *n = fs_info->qgroup_tree.rb_node;
175 struct btrfs_qgroup *qgroup;
176
177 while (n) {
178 qgroup = rb_entry(n, struct btrfs_qgroup, node);
179 if (qgroup->qgroupid < qgroupid)
180 n = n->rb_left;
181 else if (qgroup->qgroupid > qgroupid)
182 n = n->rb_right;
183 else
184 return qgroup;
185 }
186 return NULL;
187}
188
189/* must be called with qgroup_lock held */
190static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info,
191 u64 qgroupid)
192{
193 struct rb_node **p = &fs_info->qgroup_tree.rb_node;
194 struct rb_node *parent = NULL;
195 struct btrfs_qgroup *qgroup;
196
197 while (*p) {
198 parent = *p;
199 qgroup = rb_entry(parent, struct btrfs_qgroup, node);
200
201 if (qgroup->qgroupid < qgroupid)
202 p = &(*p)->rb_left;
203 else if (qgroup->qgroupid > qgroupid)
204 p = &(*p)->rb_right;
205 else
206 return qgroup;
207 }
208
209 qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC);
210 if (!qgroup)
211 return ERR_PTR(-ENOMEM);
212
213 qgroup->qgroupid = qgroupid;
214 INIT_LIST_HEAD(&qgroup->groups);
215 INIT_LIST_HEAD(&qgroup->members);
216 INIT_LIST_HEAD(&qgroup->dirty);
217
218 rb_link_node(&qgroup->node, parent, p);
219 rb_insert_color(&qgroup->node, &fs_info->qgroup_tree);
220
221 return qgroup;
222}
223
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800224static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
225 struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200226{
Arne Jansenbed92ea2012-06-28 18:03:02 +0200227 struct btrfs_qgroup_list *list;
228
Arne Jansenbed92ea2012-06-28 18:03:02 +0200229 list_del(&qgroup->dirty);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200230 while (!list_empty(&qgroup->groups)) {
231 list = list_first_entry(&qgroup->groups,
232 struct btrfs_qgroup_list, next_group);
233 list_del(&list->next_group);
234 list_del(&list->next_member);
235 kfree(list);
236 }
237
238 while (!list_empty(&qgroup->members)) {
239 list = list_first_entry(&qgroup->members,
240 struct btrfs_qgroup_list, next_member);
241 list_del(&list->next_group);
242 list_del(&list->next_member);
243 kfree(list);
244 }
Wang Shilong4082bd32013-08-14 09:13:36 +0800245}
Arne Jansenbed92ea2012-06-28 18:03:02 +0200246
Wang Shilong4082bd32013-08-14 09:13:36 +0800247/* must be called with qgroup_lock held */
248static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid)
249{
250 struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid);
251
252 if (!qgroup)
253 return -ENOENT;
254
255 rb_erase(&qgroup->node, &fs_info->qgroup_tree);
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800256 __del_qgroup_rb(fs_info, qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200257 return 0;
258}
259
260/* must be called with qgroup_lock held */
261static int add_relation_rb(struct btrfs_fs_info *fs_info,
262 u64 memberid, u64 parentid)
263{
264 struct btrfs_qgroup *member;
265 struct btrfs_qgroup *parent;
266 struct btrfs_qgroup_list *list;
267
268 member = find_qgroup_rb(fs_info, memberid);
269 parent = find_qgroup_rb(fs_info, parentid);
270 if (!member || !parent)
271 return -ENOENT;
272
273 list = kzalloc(sizeof(*list), GFP_ATOMIC);
274 if (!list)
275 return -ENOMEM;
276
277 list->group = parent;
278 list->member = member;
279 list_add_tail(&list->next_group, &member->groups);
280 list_add_tail(&list->next_member, &parent->members);
281
282 return 0;
283}
284
285/* must be called with qgroup_lock held */
286static int del_relation_rb(struct btrfs_fs_info *fs_info,
287 u64 memberid, u64 parentid)
288{
289 struct btrfs_qgroup *member;
290 struct btrfs_qgroup *parent;
291 struct btrfs_qgroup_list *list;
292
293 member = find_qgroup_rb(fs_info, memberid);
294 parent = find_qgroup_rb(fs_info, parentid);
295 if (!member || !parent)
296 return -ENOENT;
297
298 list_for_each_entry(list, &member->groups, next_group) {
299 if (list->group == parent) {
300 list_del(&list->next_group);
301 list_del(&list->next_member);
302 kfree(list);
303 return 0;
304 }
305 }
306 return -ENOENT;
307}
308
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400309#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
310int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid,
311 u64 rfer, u64 excl)
312{
313 struct btrfs_qgroup *qgroup;
314
315 qgroup = find_qgroup_rb(fs_info, qgroupid);
316 if (!qgroup)
317 return -EINVAL;
318 if (qgroup->rfer != rfer || qgroup->excl != excl)
319 return -EINVAL;
320 return 0;
321}
322#endif
323
Arne Jansenbed92ea2012-06-28 18:03:02 +0200324/*
325 * The full config is read in one go, only called from open_ctree()
326 * It doesn't use any locking, as at this point we're still single-threaded
327 */
328int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
329{
330 struct btrfs_key key;
331 struct btrfs_key found_key;
332 struct btrfs_root *quota_root = fs_info->quota_root;
333 struct btrfs_path *path = NULL;
334 struct extent_buffer *l;
335 int slot;
336 int ret = 0;
337 u64 flags = 0;
Jan Schmidtb382a322013-05-28 15:47:24 +0000338 u64 rescan_progress = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200339
Josef Bacikafcdd122016-09-02 15:40:02 -0400340 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
Arne Jansenbed92ea2012-06-28 18:03:02 +0200341 return 0;
342
David Sterba323b88f2017-02-13 12:10:20 +0100343 fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL);
Wang Shilong1e8f9152013-05-06 11:03:27 +0000344 if (!fs_info->qgroup_ulist) {
345 ret = -ENOMEM;
346 goto out;
347 }
348
Arne Jansenbed92ea2012-06-28 18:03:02 +0200349 path = btrfs_alloc_path();
350 if (!path) {
351 ret = -ENOMEM;
352 goto out;
353 }
354
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800355 ret = btrfs_sysfs_add_qgroups(fs_info);
356 if (ret < 0)
357 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200358 /* default this to quota off, in case no status key is found */
359 fs_info->qgroup_flags = 0;
360
361 /*
362 * pass 1: read status, all qgroup infos and limits
363 */
364 key.objectid = 0;
365 key.type = 0;
366 key.offset = 0;
367 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1);
368 if (ret)
369 goto out;
370
371 while (1) {
372 struct btrfs_qgroup *qgroup;
373
374 slot = path->slots[0];
375 l = path->nodes[0];
376 btrfs_item_key_to_cpu(l, &found_key, slot);
377
378 if (found_key.type == BTRFS_QGROUP_STATUS_KEY) {
379 struct btrfs_qgroup_status_item *ptr;
380
381 ptr = btrfs_item_ptr(l, slot,
382 struct btrfs_qgroup_status_item);
383
384 if (btrfs_qgroup_status_version(l, ptr) !=
385 BTRFS_QGROUP_STATUS_VERSION) {
Frank Holtonefe120a2013-12-20 11:37:06 -0500386 btrfs_err(fs_info,
387 "old qgroup version, quota disabled");
Arne Jansenbed92ea2012-06-28 18:03:02 +0200388 goto out;
389 }
390 if (btrfs_qgroup_status_generation(l, ptr) !=
391 fs_info->generation) {
392 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Frank Holtonefe120a2013-12-20 11:37:06 -0500393 btrfs_err(fs_info,
Jeff Mahoney5d163e02016-09-20 10:05:00 -0400394 "qgroup generation mismatch, marked as inconsistent");
Arne Jansenbed92ea2012-06-28 18:03:02 +0200395 }
396 fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
397 ptr);
Jan Schmidtb382a322013-05-28 15:47:24 +0000398 rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200399 goto next1;
400 }
401
402 if (found_key.type != BTRFS_QGROUP_INFO_KEY &&
403 found_key.type != BTRFS_QGROUP_LIMIT_KEY)
404 goto next1;
405
406 qgroup = find_qgroup_rb(fs_info, found_key.offset);
407 if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) ||
408 (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) {
Geert Uytterhoevend41e36a2015-07-06 15:38:11 +0200409 btrfs_err(fs_info, "inconsistent qgroup config");
Arne Jansenbed92ea2012-06-28 18:03:02 +0200410 flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
411 }
412 if (!qgroup) {
413 qgroup = add_qgroup_rb(fs_info, found_key.offset);
414 if (IS_ERR(qgroup)) {
415 ret = PTR_ERR(qgroup);
416 goto out;
417 }
418 }
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800419 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
420 if (ret < 0)
421 goto out;
422
Arne Jansenbed92ea2012-06-28 18:03:02 +0200423 switch (found_key.type) {
424 case BTRFS_QGROUP_INFO_KEY: {
425 struct btrfs_qgroup_info_item *ptr;
426
427 ptr = btrfs_item_ptr(l, slot,
428 struct btrfs_qgroup_info_item);
429 qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr);
430 qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr);
431 qgroup->excl = btrfs_qgroup_info_excl(l, ptr);
432 qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr);
433 /* generation currently unused */
434 break;
435 }
436 case BTRFS_QGROUP_LIMIT_KEY: {
437 struct btrfs_qgroup_limit_item *ptr;
438
439 ptr = btrfs_item_ptr(l, slot,
440 struct btrfs_qgroup_limit_item);
441 qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr);
442 qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr);
443 qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr);
444 qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr);
445 qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr);
446 break;
447 }
448 }
449next1:
450 ret = btrfs_next_item(quota_root, path);
451 if (ret < 0)
452 goto out;
453 if (ret)
454 break;
455 }
456 btrfs_release_path(path);
457
458 /*
459 * pass 2: read all qgroup relations
460 */
461 key.objectid = 0;
462 key.type = BTRFS_QGROUP_RELATION_KEY;
463 key.offset = 0;
464 ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0);
465 if (ret)
466 goto out;
467 while (1) {
468 slot = path->slots[0];
469 l = path->nodes[0];
470 btrfs_item_key_to_cpu(l, &found_key, slot);
471
472 if (found_key.type != BTRFS_QGROUP_RELATION_KEY)
473 goto next2;
474
475 if (found_key.objectid > found_key.offset) {
476 /* parent <- member, not needed to build config */
477 /* FIXME should we omit the key completely? */
478 goto next2;
479 }
480
481 ret = add_relation_rb(fs_info, found_key.objectid,
482 found_key.offset);
Arne Jansenff248582013-01-17 01:22:08 -0700483 if (ret == -ENOENT) {
Frank Holtonefe120a2013-12-20 11:37:06 -0500484 btrfs_warn(fs_info,
485 "orphan qgroup relation 0x%llx->0x%llx",
Geert Uytterhoevenc1c9ff72013-08-20 13:20:07 +0200486 found_key.objectid, found_key.offset);
Arne Jansenff248582013-01-17 01:22:08 -0700487 ret = 0; /* ignore the error */
488 }
Arne Jansenbed92ea2012-06-28 18:03:02 +0200489 if (ret)
490 goto out;
491next2:
492 ret = btrfs_next_item(quota_root, path);
493 if (ret < 0)
494 goto out;
495 if (ret)
496 break;
497 }
498out:
Filipe Manana3d05cad2020-11-23 14:28:44 +0000499 btrfs_free_path(path);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200500 fs_info->qgroup_flags |= flags;
Josef Bacikafcdd122016-09-02 15:40:02 -0400501 if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
502 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
503 else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
504 ret >= 0)
Jan Schmidtb382a322013-05-28 15:47:24 +0000505 ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200506
Jan Schmidteb1716a2013-05-28 15:47:23 +0000507 if (ret < 0) {
Wang Shilong1e8f9152013-05-06 11:03:27 +0000508 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000509 fs_info->qgroup_ulist = NULL;
Jan Schmidtb382a322013-05-28 15:47:24 +0000510 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800511 btrfs_sysfs_del_qgroups(fs_info);
Jan Schmidteb1716a2013-05-28 15:47:23 +0000512 }
Wang Shilong1e8f9152013-05-06 11:03:27 +0000513
Arne Jansenbed92ea2012-06-28 18:03:02 +0200514 return ret < 0 ? ret : 0;
515}
516
Qu Wenruo59582532020-06-10 09:04:44 +0800517/*
518 * Called in close_ctree() when quota is still enabled. This verifies we don't
519 * leak some reserved space.
520 *
521 * Return false if no reserved space is left.
522 * Return true if some reserved space is leaked.
523 */
524bool btrfs_check_quota_leak(struct btrfs_fs_info *fs_info)
525{
526 struct rb_node *node;
527 bool ret = false;
528
529 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
530 return ret;
531 /*
532 * Since we're unmounting, there is no race and no need to grab qgroup
533 * lock. And here we don't go post-order to provide a more user
534 * friendly sorted result.
535 */
536 for (node = rb_first(&fs_info->qgroup_tree); node; node = rb_next(node)) {
537 struct btrfs_qgroup *qgroup;
538 int i;
539
540 qgroup = rb_entry(node, struct btrfs_qgroup, node);
541 for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) {
542 if (qgroup->rsv.values[i]) {
543 ret = true;
544 btrfs_warn(fs_info,
Qu Wenruo06f67c42020-06-28 13:07:14 +0800545 "qgroup %hu/%llu has unreleased space, type %d rsv %llu",
Qu Wenruo59582532020-06-10 09:04:44 +0800546 btrfs_qgroup_level(qgroup->qgroupid),
547 btrfs_qgroup_subvolid(qgroup->qgroupid),
548 i, qgroup->rsv.values[i]);
549 }
550 }
551 }
552 return ret;
553}
554
Arne Jansenbed92ea2012-06-28 18:03:02 +0200555/*
Wang Shilonge685da12013-08-14 09:13:37 +0800556 * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(),
557 * first two are in single-threaded paths.And for the third one, we have set
558 * quota_root to be null with qgroup_lock held before, so it is safe to clean
559 * up the in-memory structures without qgroup_lock held.
Arne Jansenbed92ea2012-06-28 18:03:02 +0200560 */
561void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
562{
563 struct rb_node *n;
564 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200565
566 while ((n = rb_first(&fs_info->qgroup_tree))) {
567 qgroup = rb_entry(n, struct btrfs_qgroup, node);
568 rb_erase(n, &fs_info->qgroup_tree);
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800569 __del_qgroup_rb(fs_info, qgroup);
Filipe Manana0bb78832021-03-18 11:22:05 +0000570 btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
571 kfree(qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200572 }
Wang Shilong1e7bac12013-07-13 21:02:54 +0800573 /*
Andrea Gelmini52042d82018-11-28 12:05:13 +0100574 * We call btrfs_free_qgroup_config() when unmounting
Nicholas D Steeves01327612016-05-19 21:18:45 -0400575 * filesystem and disabling quota, so we set qgroup_ulist
Wang Shilong1e7bac12013-07-13 21:02:54 +0800576 * to be null here to avoid double free.
577 */
Wang Shilong1e8f9152013-05-06 11:03:27 +0000578 ulist_free(fs_info->qgroup_ulist);
Wang Shilong1e7bac12013-07-13 21:02:54 +0800579 fs_info->qgroup_ulist = NULL;
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800580 btrfs_sysfs_del_qgroups(fs_info);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200581}
582
Lu Fengqi711169c2018-07-18 14:45:24 +0800583static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
584 u64 dst)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200585{
586 int ret;
Lu Fengqi711169c2018-07-18 14:45:24 +0800587 struct btrfs_root *quota_root = trans->fs_info->quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200588 struct btrfs_path *path;
589 struct btrfs_key key;
590
591 path = btrfs_alloc_path();
592 if (!path)
593 return -ENOMEM;
594
595 key.objectid = src;
596 key.type = BTRFS_QGROUP_RELATION_KEY;
597 key.offset = dst;
598
599 ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0);
600
601 btrfs_mark_buffer_dirty(path->nodes[0]);
602
603 btrfs_free_path(path);
604 return ret;
605}
606
Lu Fengqi99d7f092018-07-18 14:45:25 +0800607static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src,
608 u64 dst)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200609{
610 int ret;
Lu Fengqi99d7f092018-07-18 14:45:25 +0800611 struct btrfs_root *quota_root = trans->fs_info->quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200612 struct btrfs_path *path;
613 struct btrfs_key key;
614
615 path = btrfs_alloc_path();
616 if (!path)
617 return -ENOMEM;
618
619 key.objectid = src;
620 key.type = BTRFS_QGROUP_RELATION_KEY;
621 key.offset = dst;
622
623 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
624 if (ret < 0)
625 goto out;
626
627 if (ret > 0) {
628 ret = -ENOENT;
629 goto out;
630 }
631
632 ret = btrfs_del_item(trans, quota_root, path);
633out:
634 btrfs_free_path(path);
635 return ret;
636}
637
638static int add_qgroup_item(struct btrfs_trans_handle *trans,
639 struct btrfs_root *quota_root, u64 qgroupid)
640{
641 int ret;
642 struct btrfs_path *path;
643 struct btrfs_qgroup_info_item *qgroup_info;
644 struct btrfs_qgroup_limit_item *qgroup_limit;
645 struct extent_buffer *leaf;
646 struct btrfs_key key;
647
Jeff Mahoneyf5ee5c92016-06-21 09:52:41 -0400648 if (btrfs_is_testing(quota_root->fs_info))
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400649 return 0;
David Sterbafccb84c2014-09-29 23:53:21 +0200650
Arne Jansenbed92ea2012-06-28 18:03:02 +0200651 path = btrfs_alloc_path();
652 if (!path)
653 return -ENOMEM;
654
655 key.objectid = 0;
656 key.type = BTRFS_QGROUP_INFO_KEY;
657 key.offset = qgroupid;
658
Mark Fasheh0b4699d2014-08-18 14:01:17 -0700659 /*
660 * Avoid a transaction abort by catching -EEXIST here. In that
661 * case, we proceed by re-initializing the existing structure
662 * on disk.
663 */
664
Arne Jansenbed92ea2012-06-28 18:03:02 +0200665 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
666 sizeof(*qgroup_info));
Mark Fasheh0b4699d2014-08-18 14:01:17 -0700667 if (ret && ret != -EEXIST)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200668 goto out;
669
670 leaf = path->nodes[0];
671 qgroup_info = btrfs_item_ptr(leaf, path->slots[0],
672 struct btrfs_qgroup_info_item);
673 btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid);
674 btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0);
675 btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0);
676 btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0);
677 btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0);
678
679 btrfs_mark_buffer_dirty(leaf);
680
681 btrfs_release_path(path);
682
683 key.type = BTRFS_QGROUP_LIMIT_KEY;
684 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
685 sizeof(*qgroup_limit));
Mark Fasheh0b4699d2014-08-18 14:01:17 -0700686 if (ret && ret != -EEXIST)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200687 goto out;
688
689 leaf = path->nodes[0];
690 qgroup_limit = btrfs_item_ptr(leaf, path->slots[0],
691 struct btrfs_qgroup_limit_item);
692 btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0);
693 btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0);
694 btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0);
695 btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0);
696 btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0);
697
698 btrfs_mark_buffer_dirty(leaf);
699
700 ret = 0;
701out:
702 btrfs_free_path(path);
703 return ret;
704}
705
Lu Fengqi69104612018-07-18 14:45:26 +0800706static int del_qgroup_item(struct btrfs_trans_handle *trans, u64 qgroupid)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200707{
708 int ret;
Lu Fengqi69104612018-07-18 14:45:26 +0800709 struct btrfs_root *quota_root = trans->fs_info->quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200710 struct btrfs_path *path;
711 struct btrfs_key key;
712
713 path = btrfs_alloc_path();
714 if (!path)
715 return -ENOMEM;
716
717 key.objectid = 0;
718 key.type = BTRFS_QGROUP_INFO_KEY;
719 key.offset = qgroupid;
720 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
721 if (ret < 0)
722 goto out;
723
724 if (ret > 0) {
725 ret = -ENOENT;
726 goto out;
727 }
728
729 ret = btrfs_del_item(trans, quota_root, path);
730 if (ret)
731 goto out;
732
733 btrfs_release_path(path);
734
735 key.type = BTRFS_QGROUP_LIMIT_KEY;
736 ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1);
737 if (ret < 0)
738 goto out;
739
740 if (ret > 0) {
741 ret = -ENOENT;
742 goto out;
743 }
744
745 ret = btrfs_del_item(trans, quota_root, path);
746
747out:
748 btrfs_free_path(path);
749 return ret;
750}
751
752static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
Dongsheng Yang1510e712014-11-20 21:01:41 -0500753 struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200754{
Lu Fengqiac8a8662018-07-18 14:45:27 +0800755 struct btrfs_root *quota_root = trans->fs_info->quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200756 struct btrfs_path *path;
757 struct btrfs_key key;
758 struct extent_buffer *l;
759 struct btrfs_qgroup_limit_item *qgroup_limit;
760 int ret;
761 int slot;
762
763 key.objectid = 0;
764 key.type = BTRFS_QGROUP_LIMIT_KEY;
Dongsheng Yang1510e712014-11-20 21:01:41 -0500765 key.offset = qgroup->qgroupid;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200766
767 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000768 if (!path)
769 return -ENOMEM;
770
Lu Fengqiac8a8662018-07-18 14:45:27 +0800771 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200772 if (ret > 0)
773 ret = -ENOENT;
774
775 if (ret)
776 goto out;
777
778 l = path->nodes[0];
779 slot = path->slots[0];
Valentina Giustia3df41e2013-11-04 22:34:29 +0100780 qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item);
Dongsheng Yang1510e712014-11-20 21:01:41 -0500781 btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags);
782 btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer);
783 btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl);
784 btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer);
785 btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200786
787 btrfs_mark_buffer_dirty(l);
788
789out:
790 btrfs_free_path(path);
791 return ret;
792}
793
794static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
Arne Jansenbed92ea2012-06-28 18:03:02 +0200795 struct btrfs_qgroup *qgroup)
796{
Lu Fengqi3e07e9a2018-07-18 14:45:28 +0800797 struct btrfs_fs_info *fs_info = trans->fs_info;
798 struct btrfs_root *quota_root = fs_info->quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200799 struct btrfs_path *path;
800 struct btrfs_key key;
801 struct extent_buffer *l;
802 struct btrfs_qgroup_info_item *qgroup_info;
803 int ret;
804 int slot;
805
Lu Fengqi3e07e9a2018-07-18 14:45:28 +0800806 if (btrfs_is_testing(fs_info))
Josef Bacikfaa2dbf2014-05-07 17:06:09 -0400807 return 0;
David Sterbafccb84c2014-09-29 23:53:21 +0200808
Arne Jansenbed92ea2012-06-28 18:03:02 +0200809 key.objectid = 0;
810 key.type = BTRFS_QGROUP_INFO_KEY;
811 key.offset = qgroup->qgroupid;
812
813 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000814 if (!path)
815 return -ENOMEM;
816
Lu Fengqi3e07e9a2018-07-18 14:45:28 +0800817 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200818 if (ret > 0)
819 ret = -ENOENT;
820
821 if (ret)
822 goto out;
823
824 l = path->nodes[0];
825 slot = path->slots[0];
Valentina Giustia3df41e2013-11-04 22:34:29 +0100826 qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200827 btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid);
828 btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer);
829 btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr);
830 btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl);
831 btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr);
832
833 btrfs_mark_buffer_dirty(l);
834
835out:
836 btrfs_free_path(path);
837 return ret;
838}
839
Lu Fengqi2e980ac2018-07-18 14:45:29 +0800840static int update_qgroup_status_item(struct btrfs_trans_handle *trans)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200841{
Lu Fengqi2e980ac2018-07-18 14:45:29 +0800842 struct btrfs_fs_info *fs_info = trans->fs_info;
843 struct btrfs_root *quota_root = fs_info->quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200844 struct btrfs_path *path;
845 struct btrfs_key key;
846 struct extent_buffer *l;
847 struct btrfs_qgroup_status_item *ptr;
848 int ret;
849 int slot;
850
851 key.objectid = 0;
852 key.type = BTRFS_QGROUP_STATUS_KEY;
853 key.offset = 0;
854
855 path = btrfs_alloc_path();
Wang Shilong84cbe2f2013-02-27 11:20:56 +0000856 if (!path)
857 return -ENOMEM;
858
Lu Fengqi2e980ac2018-07-18 14:45:29 +0800859 ret = btrfs_search_slot(trans, quota_root, &key, path, 0, 1);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200860 if (ret > 0)
861 ret = -ENOENT;
862
863 if (ret)
864 goto out;
865
866 l = path->nodes[0];
867 slot = path->slots[0];
868 ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item);
869 btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags);
870 btrfs_set_qgroup_status_generation(l, ptr, trans->transid);
Jan Schmidt2f232032013-04-25 16:04:51 +0000871 btrfs_set_qgroup_status_rescan(l, ptr,
872 fs_info->qgroup_rescan_progress.objectid);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200873
874 btrfs_mark_buffer_dirty(l);
875
876out:
877 btrfs_free_path(path);
878 return ret;
879}
880
881/*
882 * called with qgroup_lock held
883 */
884static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
885 struct btrfs_root *root)
886{
887 struct btrfs_path *path;
888 struct btrfs_key key;
Wang Shilong06b3a862013-02-27 11:16:57 +0000889 struct extent_buffer *leaf = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200890 int ret;
Wang Shilong06b3a862013-02-27 11:16:57 +0000891 int nr = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200892
Arne Jansenbed92ea2012-06-28 18:03:02 +0200893 path = btrfs_alloc_path();
894 if (!path)
895 return -ENOMEM;
896
Wang Shilong06b3a862013-02-27 11:16:57 +0000897 key.objectid = 0;
898 key.offset = 0;
899 key.type = 0;
900
Arne Jansenbed92ea2012-06-28 18:03:02 +0200901 while (1) {
Arne Jansenbed92ea2012-06-28 18:03:02 +0200902 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
Wang Shilong06b3a862013-02-27 11:16:57 +0000903 if (ret < 0)
904 goto out;
905 leaf = path->nodes[0];
906 nr = btrfs_header_nritems(leaf);
907 if (!nr)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200908 break;
Wang Shilong06b3a862013-02-27 11:16:57 +0000909 /*
910 * delete the leaf one by one
911 * since the whole tree is going
912 * to be deleted.
913 */
914 path->slots[0] = 0;
915 ret = btrfs_del_items(trans, root, path, 0, nr);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200916 if (ret)
917 goto out;
Wang Shilong06b3a862013-02-27 11:16:57 +0000918
Arne Jansenbed92ea2012-06-28 18:03:02 +0200919 btrfs_release_path(path);
920 }
921 ret = 0;
922out:
Arne Jansenbed92ea2012-06-28 18:03:02 +0200923 btrfs_free_path(path);
924 return ret;
925}
926
Nikolay Borisov340f1aa2018-07-05 14:50:48 +0300927int btrfs_quota_enable(struct btrfs_fs_info *fs_info)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200928{
929 struct btrfs_root *quota_root;
Wang Shilong7708f022013-04-07 10:24:57 +0000930 struct btrfs_root *tree_root = fs_info->tree_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200931 struct btrfs_path *path = NULL;
932 struct btrfs_qgroup_status_item *ptr;
933 struct extent_buffer *leaf;
934 struct btrfs_key key;
Wang Shilong7708f022013-04-07 10:24:57 +0000935 struct btrfs_key found_key;
936 struct btrfs_qgroup *qgroup = NULL;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +0300937 struct btrfs_trans_handle *trans = NULL;
Filipe Mananaa855fbe2020-11-23 18:31:02 +0000938 struct ulist *ulist = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200939 int ret = 0;
Wang Shilong7708f022013-04-07 10:24:57 +0000940 int slot;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200941
Wang Shilongf2f6ed32013-04-07 10:50:16 +0000942 mutex_lock(&fs_info->qgroup_ioctl_lock);
Nikolay Borisov5d235152018-01-31 10:52:04 +0200943 if (fs_info->quota_root)
Arne Jansenbed92ea2012-06-28 18:03:02 +0200944 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +0200945
Filipe Mananaa855fbe2020-11-23 18:31:02 +0000946 ulist = ulist_alloc(GFP_KERNEL);
947 if (!ulist) {
David Sterba7503b832018-12-19 19:47:37 +0100948 ret = -ENOMEM;
949 goto out;
950 }
951
Qu Wenruo49e5fb42020-06-28 13:07:15 +0800952 ret = btrfs_sysfs_add_qgroups(fs_info);
953 if (ret < 0)
954 goto out;
Filipe Mananaa855fbe2020-11-23 18:31:02 +0000955
956 /*
957 * Unlock qgroup_ioctl_lock before starting the transaction. This is to
958 * avoid lock acquisition inversion problems (reported by lockdep) between
959 * qgroup_ioctl_lock and the vfs freeze semaphores, acquired when we
960 * start a transaction.
961 * After we started the transaction lock qgroup_ioctl_lock again and
962 * check if someone else created the quota root in the meanwhile. If so,
963 * just return success and release the transaction handle.
964 *
965 * Also we don't need to worry about someone else calling
966 * btrfs_sysfs_add_qgroups() after we unlock and getting an error because
967 * that function returns 0 (success) when the sysfs entries already exist.
968 */
969 mutex_unlock(&fs_info->qgroup_ioctl_lock);
970
Nikolay Borisov340f1aa2018-07-05 14:50:48 +0300971 /*
972 * 1 for quota root item
973 * 1 for BTRFS_QGROUP_STATUS item
974 *
975 * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items
976 * per subvolume. However those are not currently reserved since it
977 * would be a lot of overkill.
978 */
979 trans = btrfs_start_transaction(tree_root, 2);
Filipe Mananaa855fbe2020-11-23 18:31:02 +0000980
981 mutex_lock(&fs_info->qgroup_ioctl_lock);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +0300982 if (IS_ERR(trans)) {
983 ret = PTR_ERR(trans);
984 trans = NULL;
985 goto out;
986 }
987
Filipe Mananaa855fbe2020-11-23 18:31:02 +0000988 if (fs_info->quota_root)
989 goto out;
990
991 fs_info->qgroup_ulist = ulist;
992 ulist = NULL;
993
Arne Jansenbed92ea2012-06-28 18:03:02 +0200994 /*
995 * initially create the quota tree
996 */
David Sterba9b7a2442019-03-20 13:20:49 +0100997 quota_root = btrfs_create_tree(trans, BTRFS_QUOTA_TREE_OBJECTID);
Arne Jansenbed92ea2012-06-28 18:03:02 +0200998 if (IS_ERR(quota_root)) {
999 ret = PTR_ERR(quota_root);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001000 btrfs_abort_transaction(trans, ret);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001001 goto out;
1002 }
1003
1004 path = btrfs_alloc_path();
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +00001005 if (!path) {
1006 ret = -ENOMEM;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001007 btrfs_abort_transaction(trans, ret);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +00001008 goto out_free_root;
1009 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001010
1011 key.objectid = 0;
1012 key.type = BTRFS_QGROUP_STATUS_KEY;
1013 key.offset = 0;
1014
1015 ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
1016 sizeof(*ptr));
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001017 if (ret) {
1018 btrfs_abort_transaction(trans, ret);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +00001019 goto out_free_path;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001020 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001021
1022 leaf = path->nodes[0];
1023 ptr = btrfs_item_ptr(leaf, path->slots[0],
1024 struct btrfs_qgroup_status_item);
1025 btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid);
1026 btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION);
1027 fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON |
1028 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1029 btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags);
Jan Schmidt2f232032013-04-25 16:04:51 +00001030 btrfs_set_qgroup_status_rescan(leaf, ptr, 0);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001031
1032 btrfs_mark_buffer_dirty(leaf);
1033
Wang Shilong7708f022013-04-07 10:24:57 +00001034 key.objectid = 0;
1035 key.type = BTRFS_ROOT_REF_KEY;
1036 key.offset = 0;
1037
1038 btrfs_release_path(path);
1039 ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0);
1040 if (ret > 0)
1041 goto out_add_root;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001042 if (ret < 0) {
1043 btrfs_abort_transaction(trans, ret);
Wang Shilong7708f022013-04-07 10:24:57 +00001044 goto out_free_path;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001045 }
Wang Shilong7708f022013-04-07 10:24:57 +00001046
1047 while (1) {
1048 slot = path->slots[0];
1049 leaf = path->nodes[0];
1050 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1051
1052 if (found_key.type == BTRFS_ROOT_REF_KEY) {
Josef Bacik5223cc62020-10-19 16:02:29 -04001053
1054 /* Release locks on tree_root before we access quota_root */
1055 btrfs_release_path(path);
1056
Wang Shilong7708f022013-04-07 10:24:57 +00001057 ret = add_qgroup_item(trans, quota_root,
1058 found_key.offset);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001059 if (ret) {
1060 btrfs_abort_transaction(trans, ret);
Wang Shilong7708f022013-04-07 10:24:57 +00001061 goto out_free_path;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001062 }
Wang Shilong7708f022013-04-07 10:24:57 +00001063
Wang Shilong7708f022013-04-07 10:24:57 +00001064 qgroup = add_qgroup_rb(fs_info, found_key.offset);
1065 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +00001066 ret = PTR_ERR(qgroup);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001067 btrfs_abort_transaction(trans, ret);
Wang Shilong7708f022013-04-07 10:24:57 +00001068 goto out_free_path;
1069 }
Qu Wenruo49e5fb42020-06-28 13:07:15 +08001070 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1071 if (ret < 0) {
1072 btrfs_abort_transaction(trans, ret);
1073 goto out_free_path;
1074 }
Josef Bacik5223cc62020-10-19 16:02:29 -04001075 ret = btrfs_search_slot_for_read(tree_root, &found_key,
1076 path, 1, 0);
1077 if (ret < 0) {
1078 btrfs_abort_transaction(trans, ret);
1079 goto out_free_path;
1080 }
1081 if (ret > 0) {
1082 /*
1083 * Shouldn't happen, but in case it does we
1084 * don't need to do the btrfs_next_item, just
1085 * continue.
1086 */
1087 continue;
1088 }
Wang Shilong7708f022013-04-07 10:24:57 +00001089 }
1090 ret = btrfs_next_item(tree_root, path);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001091 if (ret < 0) {
1092 btrfs_abort_transaction(trans, ret);
Wang Shilong7708f022013-04-07 10:24:57 +00001093 goto out_free_path;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001094 }
Wang Shilong7708f022013-04-07 10:24:57 +00001095 if (ret)
1096 break;
1097 }
1098
1099out_add_root:
1100 btrfs_release_path(path);
1101 ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001102 if (ret) {
1103 btrfs_abort_transaction(trans, ret);
Wang Shilong7708f022013-04-07 10:24:57 +00001104 goto out_free_path;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001105 }
Wang Shilong7708f022013-04-07 10:24:57 +00001106
Wang Shilong7708f022013-04-07 10:24:57 +00001107 qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID);
1108 if (IS_ERR(qgroup)) {
Wang Shilong7708f022013-04-07 10:24:57 +00001109 ret = PTR_ERR(qgroup);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001110 btrfs_abort_transaction(trans, ret);
Wang Shilong7708f022013-04-07 10:24:57 +00001111 goto out_free_path;
1112 }
Qu Wenruo49e5fb42020-06-28 13:07:15 +08001113 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
1114 if (ret < 0) {
1115 btrfs_abort_transaction(trans, ret);
1116 goto out_free_path;
1117 }
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001118
1119 ret = btrfs_commit_transaction(trans);
Dan Carpenterb9b8a412018-08-20 11:25:33 +03001120 trans = NULL;
1121 if (ret)
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001122 goto out_free_path;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001123
Filipe Manana9a6f2092018-11-19 14:15:36 +00001124 /*
1125 * Set quota enabled flag after committing the transaction, to avoid
1126 * deadlocks on fs_info->qgroup_ioctl_lock with concurrent snapshot
1127 * creation.
1128 */
1129 spin_lock(&fs_info->qgroup_lock);
1130 fs_info->quota_root = quota_root;
1131 set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
1132 spin_unlock(&fs_info->qgroup_lock);
1133
Nikolay Borisov5d235152018-01-31 10:52:04 +02001134 ret = qgroup_rescan_init(fs_info, 0, 1);
1135 if (!ret) {
1136 qgroup_rescan_zero_tracking(fs_info);
Qu Wenruod61acbb2020-02-07 13:38:20 +08001137 fs_info->qgroup_rescan_running = true;
Nikolay Borisov5d235152018-01-31 10:52:04 +02001138 btrfs_queue_work(fs_info->qgroup_rescan_workers,
1139 &fs_info->qgroup_rescan_work);
1140 }
1141
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +00001142out_free_path:
Arne Jansenbed92ea2012-06-28 18:03:02 +02001143 btrfs_free_path(path);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +00001144out_free_root:
Josef Bacik8c389382020-02-14 16:11:42 -05001145 if (ret)
Josef Bacik00246522020-01-24 09:33:01 -05001146 btrfs_put_root(quota_root);
Tsutomu Itoh5b7ff5b2012-10-16 05:44:21 +00001147out:
Jan Schmidteb1716a2013-05-28 15:47:23 +00001148 if (ret) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00001149 ulist_free(fs_info->qgroup_ulist);
Jan Schmidteb1716a2013-05-28 15:47:23 +00001150 fs_info->qgroup_ulist = NULL;
Qu Wenruo49e5fb42020-06-28 13:07:15 +08001151 btrfs_sysfs_del_qgroups(fs_info);
Jan Schmidteb1716a2013-05-28 15:47:23 +00001152 }
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001153 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001154 if (ret && trans)
1155 btrfs_end_transaction(trans);
1156 else if (trans)
1157 ret = btrfs_end_transaction(trans);
1158 ulist_free(ulist);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001159 return ret;
1160}
1161
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001162int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001163{
Arne Jansenbed92ea2012-06-28 18:03:02 +02001164 struct btrfs_root *quota_root;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001165 struct btrfs_trans_handle *trans = NULL;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001166 int ret = 0;
1167
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001168 mutex_lock(&fs_info->qgroup_ioctl_lock);
Wang Shilong58400fc2013-04-07 10:50:17 +00001169 if (!fs_info->quota_root)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001170 goto out;
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001171 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001172
1173 /*
1174 * 1 For the root item
1175 *
1176 * We should also reserve enough items for the quota tree deletion in
1177 * btrfs_clean_quota_tree but this is not done.
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001178 *
1179 * Also, we must always start a transaction without holding the mutex
1180 * qgroup_ioctl_lock, see btrfs_quota_enable().
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001181 */
1182 trans = btrfs_start_transaction(fs_info->tree_root, 1);
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001183
1184 mutex_lock(&fs_info->qgroup_ioctl_lock);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001185 if (IS_ERR(trans)) {
1186 ret = PTR_ERR(trans);
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001187 trans = NULL;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001188 goto out;
1189 }
1190
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001191 if (!fs_info->quota_root)
1192 goto out;
1193
Josef Bacikafcdd122016-09-02 15:40:02 -04001194 clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
Jeff Mahoneyd06f23d2016-08-08 22:08:06 -04001195 btrfs_qgroup_wait_for_completion(fs_info, false);
Justin Maggard967ef512015-11-06 10:36:42 -08001196 spin_lock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001197 quota_root = fs_info->quota_root;
1198 fs_info->quota_root = NULL;
Dongsheng Yang8ea0ec92015-02-27 16:24:26 +08001199 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001200 spin_unlock(&fs_info->qgroup_lock);
1201
Wang Shilonge685da12013-08-14 09:13:37 +08001202 btrfs_free_qgroup_config(fs_info);
1203
Arne Jansenbed92ea2012-06-28 18:03:02 +02001204 ret = btrfs_clean_quota_tree(trans, quota_root);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001205 if (ret) {
1206 btrfs_abort_transaction(trans, ret);
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001207 goto out;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001208 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001209
Lu Fengqiab9ce7d2018-08-01 11:32:27 +08001210 ret = btrfs_del_root(trans, &quota_root->root_key);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001211 if (ret) {
1212 btrfs_abort_transaction(trans, ret);
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001213 goto out;
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001214 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001215
1216 list_del(&quota_root->dirty_list);
1217
1218 btrfs_tree_lock(quota_root->node);
David Sterba6a884d7d2019-03-20 14:30:02 +01001219 btrfs_clean_tree_block(quota_root->node);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001220 btrfs_tree_unlock(quota_root->node);
1221 btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
1222
Josef Bacik00246522020-01-24 09:33:01 -05001223 btrfs_put_root(quota_root);
Nikolay Borisov340f1aa2018-07-05 14:50:48 +03001224
Arne Jansenbed92ea2012-06-28 18:03:02 +02001225out:
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001226 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Filipe Mananaa855fbe2020-11-23 18:31:02 +00001227 if (ret && trans)
1228 btrfs_end_transaction(trans);
1229 else if (trans)
1230 ret = btrfs_end_transaction(trans);
1231
Arne Jansenbed92ea2012-06-28 18:03:02 +02001232 return ret;
1233}
1234
Jan Schmidt2f232032013-04-25 16:04:51 +00001235static void qgroup_dirty(struct btrfs_fs_info *fs_info,
1236 struct btrfs_qgroup *qgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001237{
Jan Schmidt2f232032013-04-25 16:04:51 +00001238 if (list_empty(&qgroup->dirty))
1239 list_add(&qgroup->dirty, &fs_info->dirty_qgroups);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001240}
1241
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001242/*
Qu Wenruo429d6272017-12-12 15:34:26 +08001243 * The easy accounting, we're updating qgroup relationship whose child qgroup
1244 * only has exclusive extents.
1245 *
Andrea Gelmini52042d82018-11-28 12:05:13 +01001246 * In this case, all exclusive extents will also be exclusive for parent, so
Qu Wenruo429d6272017-12-12 15:34:26 +08001247 * excl/rfer just get added/removed.
1248 *
1249 * So is qgroup reservation space, which should also be added/removed to
1250 * parent.
1251 * Or when child tries to release reservation space, parent will underflow its
1252 * reservation (for relationship adding case).
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001253 *
1254 * Caller should hold fs_info->qgroup_lock.
1255 */
1256static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
1257 struct ulist *tmp, u64 ref_root,
Qu Wenruo429d6272017-12-12 15:34:26 +08001258 struct btrfs_qgroup *src, int sign)
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001259{
1260 struct btrfs_qgroup *qgroup;
1261 struct btrfs_qgroup_list *glist;
1262 struct ulist_node *unode;
1263 struct ulist_iterator uiter;
Qu Wenruo429d6272017-12-12 15:34:26 +08001264 u64 num_bytes = src->excl;
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001265 int ret = 0;
1266
1267 qgroup = find_qgroup_rb(fs_info, ref_root);
1268 if (!qgroup)
1269 goto out;
1270
1271 qgroup->rfer += sign * num_bytes;
1272 qgroup->rfer_cmpr += sign * num_bytes;
1273
1274 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1275 qgroup->excl += sign * num_bytes;
1276 qgroup->excl_cmpr += sign * num_bytes;
Qu Wenruo429d6272017-12-12 15:34:26 +08001277
1278 if (sign > 0)
Qu Wenruo64ee4e72017-12-12 15:34:27 +08001279 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
Qu Wenruo429d6272017-12-12 15:34:26 +08001280 else
Qu Wenruo64ee4e72017-12-12 15:34:27 +08001281 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001282
1283 qgroup_dirty(fs_info, qgroup);
1284
1285 /* Get all of the parent groups that contain this qgroup */
1286 list_for_each_entry(glist, &qgroup->groups, next_group) {
1287 ret = ulist_add(tmp, glist->group->qgroupid,
David Sterbaef2fff62016-10-26 16:23:50 +02001288 qgroup_to_aux(glist->group), GFP_ATOMIC);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001289 if (ret < 0)
1290 goto out;
1291 }
1292
1293 /* Iterate all of the parents and adjust their reference counts */
1294 ULIST_ITER_INIT(&uiter);
1295 while ((unode = ulist_next(tmp, &uiter))) {
David Sterbaef2fff62016-10-26 16:23:50 +02001296 qgroup = unode_aux_to_qgroup(unode);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001297 qgroup->rfer += sign * num_bytes;
1298 qgroup->rfer_cmpr += sign * num_bytes;
1299 WARN_ON(sign < 0 && qgroup->excl < num_bytes);
1300 qgroup->excl += sign * num_bytes;
Qu Wenruo429d6272017-12-12 15:34:26 +08001301 if (sign > 0)
Qu Wenruo64ee4e72017-12-12 15:34:27 +08001302 qgroup_rsv_add_by_qgroup(fs_info, qgroup, src);
Qu Wenruo429d6272017-12-12 15:34:26 +08001303 else
Qu Wenruo64ee4e72017-12-12 15:34:27 +08001304 qgroup_rsv_release_by_qgroup(fs_info, qgroup, src);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001305 qgroup->excl_cmpr += sign * num_bytes;
1306 qgroup_dirty(fs_info, qgroup);
1307
1308 /* Add any parents of the parents */
1309 list_for_each_entry(glist, &qgroup->groups, next_group) {
1310 ret = ulist_add(tmp, glist->group->qgroupid,
David Sterbaef2fff62016-10-26 16:23:50 +02001311 qgroup_to_aux(glist->group), GFP_ATOMIC);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001312 if (ret < 0)
1313 goto out;
1314 }
1315 }
1316 ret = 0;
1317out:
1318 return ret;
1319}
1320
1321
1322/*
1323 * Quick path for updating qgroup with only excl refs.
1324 *
1325 * In that case, just update all parent will be enough.
1326 * Or we needs to do a full rescan.
1327 * Caller should also hold fs_info->qgroup_lock.
1328 *
1329 * Return 0 for quick update, return >0 for need to full rescan
1330 * and mark INCONSISTENT flag.
1331 * Return < 0 for other error.
1332 */
1333static int quick_update_accounting(struct btrfs_fs_info *fs_info,
1334 struct ulist *tmp, u64 src, u64 dst,
1335 int sign)
1336{
1337 struct btrfs_qgroup *qgroup;
1338 int ret = 1;
1339 int err = 0;
1340
1341 qgroup = find_qgroup_rb(fs_info, src);
1342 if (!qgroup)
1343 goto out;
1344 if (qgroup->excl == qgroup->rfer) {
1345 ret = 0;
1346 err = __qgroup_excl_accounting(fs_info, tmp, dst,
Qu Wenruo429d6272017-12-12 15:34:26 +08001347 qgroup, sign);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001348 if (err < 0) {
1349 ret = err;
1350 goto out;
1351 }
1352 }
1353out:
1354 if (ret)
1355 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1356 return ret;
1357}
1358
Lu Fengqi9f8a6ce2018-07-18 14:45:30 +08001359int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1360 u64 dst)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001361{
Lu Fengqi9f8a6ce2018-07-18 14:45:30 +08001362 struct btrfs_fs_info *fs_info = trans->fs_info;
Wang Shilongb7fef4f2013-04-07 10:50:18 +00001363 struct btrfs_qgroup *parent;
1364 struct btrfs_qgroup *member;
Wang Shilong534e6622013-04-17 14:49:51 +00001365 struct btrfs_qgroup_list *list;
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001366 struct ulist *tmp;
Filipe Manana7aa6d352020-11-23 18:30:54 +00001367 unsigned int nofs_flag;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001368 int ret = 0;
1369
Qu Wenruo8465ece2015-02-27 16:24:22 +08001370 /* Check the level of src and dst first */
1371 if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst))
1372 return -EINVAL;
1373
Filipe Manana7aa6d352020-11-23 18:30:54 +00001374 /* We hold a transaction handle open, must do a NOFS allocation. */
1375 nofs_flag = memalloc_nofs_save();
David Sterba6602caf2017-02-13 12:41:02 +01001376 tmp = ulist_alloc(GFP_KERNEL);
Filipe Manana7aa6d352020-11-23 18:30:54 +00001377 memalloc_nofs_restore(nofs_flag);
Christian Engelmayerab3680d2015-05-02 17:19:55 +02001378 if (!tmp)
1379 return -ENOMEM;
1380
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001381 mutex_lock(&fs_info->qgroup_ioctl_lock);
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03001382 if (!fs_info->quota_root) {
Marcos Paulo de Souza8a36e402019-11-25 21:58:51 -03001383 ret = -ENOTCONN;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001384 goto out;
1385 }
Wang Shilongb7fef4f2013-04-07 10:50:18 +00001386 member = find_qgroup_rb(fs_info, src);
1387 parent = find_qgroup_rb(fs_info, dst);
1388 if (!member || !parent) {
1389 ret = -EINVAL;
1390 goto out;
1391 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001392
Wang Shilong534e6622013-04-17 14:49:51 +00001393 /* check if such qgroup relation exist firstly */
1394 list_for_each_entry(list, &member->groups, next_group) {
1395 if (list->group == parent) {
1396 ret = -EEXIST;
1397 goto out;
1398 }
1399 }
1400
Lu Fengqi711169c2018-07-18 14:45:24 +08001401 ret = add_qgroup_relation_item(trans, src, dst);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001402 if (ret)
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001403 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001404
Lu Fengqi711169c2018-07-18 14:45:24 +08001405 ret = add_qgroup_relation_item(trans, dst, src);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001406 if (ret) {
Lu Fengqi99d7f092018-07-18 14:45:25 +08001407 del_qgroup_relation_item(trans, src, dst);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001408 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001409 }
1410
1411 spin_lock(&fs_info->qgroup_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001412 ret = add_relation_rb(fs_info, src, dst);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001413 if (ret < 0) {
1414 spin_unlock(&fs_info->qgroup_lock);
1415 goto out;
1416 }
1417 ret = quick_update_accounting(fs_info, tmp, src, dst, 1);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001418 spin_unlock(&fs_info->qgroup_lock);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001419out:
1420 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001421 ulist_free(tmp);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001422 return ret;
1423}
1424
Lu Fengqi6b36f1a2018-07-18 14:45:31 +08001425static int __del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1426 u64 dst)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001427{
Lu Fengqi6b36f1a2018-07-18 14:45:31 +08001428 struct btrfs_fs_info *fs_info = trans->fs_info;
Wang Shilong534e6622013-04-17 14:49:51 +00001429 struct btrfs_qgroup *parent;
1430 struct btrfs_qgroup *member;
1431 struct btrfs_qgroup_list *list;
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001432 struct ulist *tmp;
Qu Wenruo73798c42019-08-06 22:05:07 +08001433 bool found = false;
Filipe Manana7aa6d352020-11-23 18:30:54 +00001434 unsigned int nofs_flag;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001435 int ret = 0;
Qu Wenruo73798c42019-08-06 22:05:07 +08001436 int ret2;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001437
Filipe Manana7aa6d352020-11-23 18:30:54 +00001438 /* We hold a transaction handle open, must do a NOFS allocation. */
1439 nofs_flag = memalloc_nofs_save();
David Sterba6602caf2017-02-13 12:41:02 +01001440 tmp = ulist_alloc(GFP_KERNEL);
Filipe Manana7aa6d352020-11-23 18:30:54 +00001441 memalloc_nofs_restore(nofs_flag);
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001442 if (!tmp)
1443 return -ENOMEM;
1444
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03001445 if (!fs_info->quota_root) {
Marcos Paulo de Souza8a36e402019-11-25 21:58:51 -03001446 ret = -ENOTCONN;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001447 goto out;
1448 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001449
Wang Shilong534e6622013-04-17 14:49:51 +00001450 member = find_qgroup_rb(fs_info, src);
1451 parent = find_qgroup_rb(fs_info, dst);
Qu Wenruo73798c42019-08-06 22:05:07 +08001452 /*
1453 * The parent/member pair doesn't exist, then try to delete the dead
1454 * relation items only.
1455 */
1456 if (!member || !parent)
1457 goto delete_item;
Wang Shilong534e6622013-04-17 14:49:51 +00001458
1459 /* check if such qgroup relation exist firstly */
1460 list_for_each_entry(list, &member->groups, next_group) {
Qu Wenruo73798c42019-08-06 22:05:07 +08001461 if (list->group == parent) {
1462 found = true;
1463 break;
1464 }
Wang Shilong534e6622013-04-17 14:49:51 +00001465 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001466
Qu Wenruo73798c42019-08-06 22:05:07 +08001467delete_item:
1468 ret = del_qgroup_relation_item(trans, src, dst);
1469 if (ret < 0 && ret != -ENOENT)
1470 goto out;
1471 ret2 = del_qgroup_relation_item(trans, dst, src);
1472 if (ret2 < 0 && ret2 != -ENOENT)
1473 goto out;
1474
1475 /* At least one deletion succeeded, return 0 */
1476 if (!ret || !ret2)
1477 ret = 0;
1478
1479 if (found) {
1480 spin_lock(&fs_info->qgroup_lock);
1481 del_relation_rb(fs_info, src, dst);
1482 ret = quick_update_accounting(fs_info, tmp, src, dst, -1);
1483 spin_unlock(&fs_info->qgroup_lock);
1484 }
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001485out:
Qu Wenruo9c8b35b2015-02-27 16:24:27 +08001486 ulist_free(tmp);
Dongsheng Yangf5a6b1c2014-11-24 10:27:09 -05001487 return ret;
1488}
1489
Lu Fengqi39616c22018-07-18 14:45:32 +08001490int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, u64 src,
1491 u64 dst)
Dongsheng Yangf5a6b1c2014-11-24 10:27:09 -05001492{
Lu Fengqi39616c22018-07-18 14:45:32 +08001493 struct btrfs_fs_info *fs_info = trans->fs_info;
Dongsheng Yangf5a6b1c2014-11-24 10:27:09 -05001494 int ret = 0;
1495
1496 mutex_lock(&fs_info->qgroup_ioctl_lock);
Lu Fengqi6b36f1a2018-07-18 14:45:31 +08001497 ret = __del_qgroup_relation(trans, src, dst);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001498 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Dongsheng Yangf5a6b1c2014-11-24 10:27:09 -05001499
Arne Jansenbed92ea2012-06-28 18:03:02 +02001500 return ret;
1501}
1502
Lu Fengqi49a05ec2018-07-18 14:45:33 +08001503int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001504{
Lu Fengqi49a05ec2018-07-18 14:45:33 +08001505 struct btrfs_fs_info *fs_info = trans->fs_info;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001506 struct btrfs_root *quota_root;
1507 struct btrfs_qgroup *qgroup;
1508 int ret = 0;
1509
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001510 mutex_lock(&fs_info->qgroup_ioctl_lock);
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03001511 if (!fs_info->quota_root) {
Marcos Paulo de Souza8a36e402019-11-25 21:58:51 -03001512 ret = -ENOTCONN;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001513 goto out;
1514 }
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03001515 quota_root = fs_info->quota_root;
Wang Shilong534e6622013-04-17 14:49:51 +00001516 qgroup = find_qgroup_rb(fs_info, qgroupid);
1517 if (qgroup) {
1518 ret = -EEXIST;
1519 goto out;
1520 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001521
1522 ret = add_qgroup_item(trans, quota_root, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001523 if (ret)
1524 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001525
1526 spin_lock(&fs_info->qgroup_lock);
1527 qgroup = add_qgroup_rb(fs_info, qgroupid);
1528 spin_unlock(&fs_info->qgroup_lock);
1529
Qu Wenruo49e5fb42020-06-28 13:07:15 +08001530 if (IS_ERR(qgroup)) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02001531 ret = PTR_ERR(qgroup);
Qu Wenruo49e5fb42020-06-28 13:07:15 +08001532 goto out;
1533 }
1534 ret = btrfs_sysfs_add_one_qgroup(fs_info, qgroup);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001535out:
1536 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001537 return ret;
1538}
1539
Lu Fengqi3efbee12018-07-18 14:45:34 +08001540int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
Arne Jansenbed92ea2012-06-28 18:03:02 +02001541{
Lu Fengqi3efbee12018-07-18 14:45:34 +08001542 struct btrfs_fs_info *fs_info = trans->fs_info;
Arne Jansen2cf68702013-01-17 01:22:09 -07001543 struct btrfs_qgroup *qgroup;
Dongsheng Yangf5a6b1c2014-11-24 10:27:09 -05001544 struct btrfs_qgroup_list *list;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001545 int ret = 0;
1546
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001547 mutex_lock(&fs_info->qgroup_ioctl_lock);
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03001548 if (!fs_info->quota_root) {
Marcos Paulo de Souza8a36e402019-11-25 21:58:51 -03001549 ret = -ENOTCONN;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001550 goto out;
1551 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001552
Arne Jansen2cf68702013-01-17 01:22:09 -07001553 qgroup = find_qgroup_rb(fs_info, qgroupid);
Wang Shilong534e6622013-04-17 14:49:51 +00001554 if (!qgroup) {
1555 ret = -ENOENT;
1556 goto out;
Arne Jansen2cf68702013-01-17 01:22:09 -07001557 }
Lu Fengqib90e22b2018-10-11 13:42:56 +08001558
1559 /* Check if there are no children of this qgroup */
1560 if (!list_empty(&qgroup->members)) {
1561 ret = -EBUSY;
1562 goto out;
1563 }
1564
Lu Fengqi69104612018-07-18 14:45:26 +08001565 ret = del_qgroup_item(trans, qgroupid);
Sargun Dhillon36b96fd2017-09-17 09:02:29 +00001566 if (ret && ret != -ENOENT)
1567 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001568
Dongsheng Yangf5a6b1c2014-11-24 10:27:09 -05001569 while (!list_empty(&qgroup->groups)) {
1570 list = list_first_entry(&qgroup->groups,
1571 struct btrfs_qgroup_list, next_group);
Lu Fengqi6b36f1a2018-07-18 14:45:31 +08001572 ret = __del_qgroup_relation(trans, qgroupid,
1573 list->group->qgroupid);
Dongsheng Yangf5a6b1c2014-11-24 10:27:09 -05001574 if (ret)
1575 goto out;
1576 }
1577
Arne Jansenbed92ea2012-06-28 18:03:02 +02001578 spin_lock(&fs_info->qgroup_lock);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001579 del_qgroup_rb(fs_info, qgroupid);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001580 spin_unlock(&fs_info->qgroup_lock);
Filipe Manana0bb78832021-03-18 11:22:05 +00001581
1582 /*
1583 * Remove the qgroup from sysfs now without holding the qgroup_lock
1584 * spinlock, since the sysfs_remove_group() function needs to take
1585 * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
1586 */
1587 btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
1588 kfree(qgroup);
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001589out:
1590 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001591 return ret;
1592}
1593
Lu Fengqif0042d52018-07-18 14:45:35 +08001594int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid,
Arne Jansenbed92ea2012-06-28 18:03:02 +02001595 struct btrfs_qgroup_limit *limit)
1596{
Lu Fengqif0042d52018-07-18 14:45:35 +08001597 struct btrfs_fs_info *fs_info = trans->fs_info;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001598 struct btrfs_qgroup *qgroup;
1599 int ret = 0;
Yang Dongshengfe759902015-06-03 14:57:32 +08001600 /* Sometimes we would want to clear the limit on this qgroup.
1601 * To meet this requirement, we treat the -1 as a special value
1602 * which tell kernel to clear the limit on this qgroup.
1603 */
1604 const u64 CLEAR_VALUE = -1;
Arne Jansenbed92ea2012-06-28 18:03:02 +02001605
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001606 mutex_lock(&fs_info->qgroup_ioctl_lock);
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03001607 if (!fs_info->quota_root) {
Marcos Paulo de Souza8a36e402019-11-25 21:58:51 -03001608 ret = -ENOTCONN;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001609 goto out;
1610 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001611
Wang Shilongddb47af2013-04-07 10:50:20 +00001612 qgroup = find_qgroup_rb(fs_info, qgroupid);
1613 if (!qgroup) {
1614 ret = -ENOENT;
1615 goto out;
1616 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02001617
Wang Shilong58400fc2013-04-07 10:50:17 +00001618 spin_lock(&fs_info->qgroup_lock);
Yang Dongshengfe759902015-06-03 14:57:32 +08001619 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) {
1620 if (limit->max_rfer == CLEAR_VALUE) {
1621 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1622 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER;
1623 qgroup->max_rfer = 0;
1624 } else {
1625 qgroup->max_rfer = limit->max_rfer;
1626 }
1627 }
1628 if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
1629 if (limit->max_excl == CLEAR_VALUE) {
1630 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1631 limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL;
1632 qgroup->max_excl = 0;
1633 } else {
1634 qgroup->max_excl = limit->max_excl;
1635 }
1636 }
1637 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) {
1638 if (limit->rsv_rfer == CLEAR_VALUE) {
1639 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1640 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER;
1641 qgroup->rsv_rfer = 0;
1642 } else {
1643 qgroup->rsv_rfer = limit->rsv_rfer;
1644 }
1645 }
1646 if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) {
1647 if (limit->rsv_excl == CLEAR_VALUE) {
1648 qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1649 limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL;
1650 qgroup->rsv_excl = 0;
1651 } else {
1652 qgroup->rsv_excl = limit->rsv_excl;
1653 }
1654 }
Dongsheng Yang03477d92015-02-06 11:06:25 -05001655 qgroup->lim_flags |= limit->flags;
1656
Arne Jansenbed92ea2012-06-28 18:03:02 +02001657 spin_unlock(&fs_info->qgroup_lock);
Dongsheng Yang1510e712014-11-20 21:01:41 -05001658
Lu Fengqiac8a8662018-07-18 14:45:27 +08001659 ret = update_qgroup_limit_item(trans, qgroup);
Dongsheng Yang1510e712014-11-20 21:01:41 -05001660 if (ret) {
1661 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1662 btrfs_info(fs_info, "unable to update quota limit for %llu",
1663 qgroupid);
1664 }
1665
Wang Shilongf2f6ed32013-04-07 10:50:16 +00001666out:
1667 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02001668 return ret;
1669}
Mark Fasheh11526512014-07-17 12:39:01 -07001670
Qu Wenruo50b3e042016-10-18 09:31:27 +08001671int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
Qu Wenruocb93b522016-08-15 10:36:50 +08001672 struct btrfs_delayed_ref_root *delayed_refs,
1673 struct btrfs_qgroup_extent_record *record)
Qu Wenruo3368d002015-04-16 14:34:17 +08001674{
1675 struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
1676 struct rb_node *parent_node = NULL;
1677 struct btrfs_qgroup_extent_record *entry;
1678 u64 bytenr = record->bytenr;
1679
David Sterbaa4666e62018-03-16 02:21:22 +01001680 lockdep_assert_held(&delayed_refs->lock);
Qu Wenruo50b3e042016-10-18 09:31:27 +08001681 trace_btrfs_qgroup_trace_extent(fs_info, record);
Mark Fasheh82bd1012015-11-05 14:38:00 -08001682
Qu Wenruo3368d002015-04-16 14:34:17 +08001683 while (*p) {
1684 parent_node = *p;
1685 entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record,
1686 node);
Qu Wenruo1418bae2019-01-23 15:15:12 +08001687 if (bytenr < entry->bytenr) {
Qu Wenruo3368d002015-04-16 14:34:17 +08001688 p = &(*p)->rb_left;
Qu Wenruo1418bae2019-01-23 15:15:12 +08001689 } else if (bytenr > entry->bytenr) {
Qu Wenruo3368d002015-04-16 14:34:17 +08001690 p = &(*p)->rb_right;
Qu Wenruo1418bae2019-01-23 15:15:12 +08001691 } else {
1692 if (record->data_rsv && !entry->data_rsv) {
1693 entry->data_rsv = record->data_rsv;
1694 entry->data_rsv_refroot =
1695 record->data_rsv_refroot;
1696 }
Qu Wenruocb93b522016-08-15 10:36:50 +08001697 return 1;
Qu Wenruo1418bae2019-01-23 15:15:12 +08001698 }
Qu Wenruo3368d002015-04-16 14:34:17 +08001699 }
1700
1701 rb_link_node(&record->node, parent_node, p);
1702 rb_insert_color(&record->node, &delayed_refs->dirty_extent_root);
Qu Wenruocb93b522016-08-15 10:36:50 +08001703 return 0;
1704}
1705
Qu Wenruofb235dc2017-02-15 10:43:03 +08001706int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info,
1707 struct btrfs_qgroup_extent_record *qrecord)
1708{
1709 struct ulist *old_root;
1710 u64 bytenr = qrecord->bytenr;
1711 int ret;
1712
Zygo Blaxellc995ab32017-09-22 13:58:45 -04001713 ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false);
Nikolay Borisov952bd3db2018-01-29 15:53:01 +02001714 if (ret < 0) {
1715 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
1716 btrfs_warn(fs_info,
1717"error accounting new delayed refs extent (err code: %d), quota inconsistent",
1718 ret);
1719 return 0;
1720 }
Qu Wenruofb235dc2017-02-15 10:43:03 +08001721
1722 /*
1723 * Here we don't need to get the lock of
1724 * trans->transaction->delayed_refs, since inserted qrecord won't
1725 * be deleted, only qrecord->node may be modified (new qrecord insert)
1726 *
1727 * So modifying qrecord->old_roots is safe here
1728 */
1729 qrecord->old_roots = old_root;
1730 return 0;
1731}
1732
Lu Fengqia95f3aa2018-07-18 16:28:03 +08001733int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, u64 bytenr,
1734 u64 num_bytes, gfp_t gfp_flag)
Qu Wenruocb93b522016-08-15 10:36:50 +08001735{
Lu Fengqia95f3aa2018-07-18 16:28:03 +08001736 struct btrfs_fs_info *fs_info = trans->fs_info;
Qu Wenruocb93b522016-08-15 10:36:50 +08001737 struct btrfs_qgroup_extent_record *record;
1738 struct btrfs_delayed_ref_root *delayed_refs;
1739 int ret;
1740
Josef Bacikafcdd122016-09-02 15:40:02 -04001741 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)
1742 || bytenr == 0 || num_bytes == 0)
Qu Wenruocb93b522016-08-15 10:36:50 +08001743 return 0;
Qu Wenruo1418bae2019-01-23 15:15:12 +08001744 record = kzalloc(sizeof(*record), gfp_flag);
Qu Wenruocb93b522016-08-15 10:36:50 +08001745 if (!record)
1746 return -ENOMEM;
1747
1748 delayed_refs = &trans->transaction->delayed_refs;
1749 record->bytenr = bytenr;
1750 record->num_bytes = num_bytes;
1751 record->old_roots = NULL;
1752
1753 spin_lock(&delayed_refs->lock);
Jeff Mahoney2ff7e612016-06-22 18:54:24 -04001754 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
Qu Wenruocb93b522016-08-15 10:36:50 +08001755 spin_unlock(&delayed_refs->lock);
Qu Wenruofb235dc2017-02-15 10:43:03 +08001756 if (ret > 0) {
Qu Wenruocb93b522016-08-15 10:36:50 +08001757 kfree(record);
Qu Wenruofb235dc2017-02-15 10:43:03 +08001758 return 0;
1759 }
1760 return btrfs_qgroup_trace_extent_post(fs_info, record);
Qu Wenruo3368d002015-04-16 14:34:17 +08001761}
1762
Qu Wenruo33d1f052016-10-18 09:31:28 +08001763int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
Qu Wenruo33d1f052016-10-18 09:31:28 +08001764 struct extent_buffer *eb)
1765{
Lu Fengqi8d38d7e2018-07-18 14:45:37 +08001766 struct btrfs_fs_info *fs_info = trans->fs_info;
Qu Wenruo33d1f052016-10-18 09:31:28 +08001767 int nr = btrfs_header_nritems(eb);
1768 int i, extent_type, ret;
1769 struct btrfs_key key;
1770 struct btrfs_file_extent_item *fi;
1771 u64 bytenr, num_bytes;
1772
1773 /* We can be called directly from walk_up_proc() */
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001774 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
Qu Wenruo33d1f052016-10-18 09:31:28 +08001775 return 0;
1776
1777 for (i = 0; i < nr; i++) {
1778 btrfs_item_key_to_cpu(eb, &key, i);
1779
1780 if (key.type != BTRFS_EXTENT_DATA_KEY)
1781 continue;
1782
1783 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
1784 /* filter out non qgroup-accountable extents */
1785 extent_type = btrfs_file_extent_type(eb, fi);
1786
1787 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1788 continue;
1789
1790 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
1791 if (!bytenr)
1792 continue;
1793
1794 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
1795
Lu Fengqia95f3aa2018-07-18 16:28:03 +08001796 ret = btrfs_qgroup_trace_extent(trans, bytenr, num_bytes,
1797 GFP_NOFS);
Qu Wenruo33d1f052016-10-18 09:31:28 +08001798 if (ret)
1799 return ret;
1800 }
Jeff Mahoneycddf3b22017-06-20 08:15:26 -04001801 cond_resched();
Qu Wenruo33d1f052016-10-18 09:31:28 +08001802 return 0;
1803}
1804
1805/*
1806 * Walk up the tree from the bottom, freeing leaves and any interior
1807 * nodes which have had all slots visited. If a node (leaf or
1808 * interior) is freed, the node above it will have it's slot
1809 * incremented. The root node will never be freed.
1810 *
1811 * At the end of this function, we should have a path which has all
1812 * slots incremented to the next position for a search. If we need to
1813 * read a new node it will be NULL and the node above it will have the
1814 * correct slot selected for a later read.
1815 *
1816 * If we increment the root nodes slot counter past the number of
1817 * elements, 1 is returned to signal completion of the search.
1818 */
David Sterba15b34512017-02-10 20:30:23 +01001819static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
Qu Wenruo33d1f052016-10-18 09:31:28 +08001820{
1821 int level = 0;
1822 int nr, slot;
1823 struct extent_buffer *eb;
1824
1825 if (root_level == 0)
1826 return 1;
1827
1828 while (level <= root_level) {
1829 eb = path->nodes[level];
1830 nr = btrfs_header_nritems(eb);
1831 path->slots[level]++;
1832 slot = path->slots[level];
1833 if (slot >= nr || level == 0) {
1834 /*
1835 * Don't free the root - we will detect this
1836 * condition after our loop and return a
1837 * positive value for caller to stop walking the tree.
1838 */
1839 if (level != root_level) {
1840 btrfs_tree_unlock_rw(eb, path->locks[level]);
1841 path->locks[level] = 0;
1842
1843 free_extent_buffer(eb);
1844 path->nodes[level] = NULL;
1845 path->slots[level] = 0;
1846 }
1847 } else {
1848 /*
1849 * We have a valid slot to walk back down
1850 * from. Stop here so caller can process these
1851 * new nodes.
1852 */
1853 break;
1854 }
1855
1856 level++;
1857 }
1858
1859 eb = path->nodes[root_level];
1860 if (path->slots[root_level] >= btrfs_header_nritems(eb))
1861 return 1;
1862
1863 return 0;
1864}
1865
Qu Wenruo25982562018-09-27 14:42:30 +08001866/*
1867 * Helper function to trace a subtree tree block swap.
1868 *
1869 * The swap will happen in highest tree block, but there may be a lot of
1870 * tree blocks involved.
1871 *
1872 * For example:
1873 * OO = Old tree blocks
1874 * NN = New tree blocks allocated during balance
1875 *
1876 * File tree (257) Reloc tree for 257
1877 * L2 OO NN
1878 * / \ / \
1879 * L1 OO OO (a) OO NN (a)
1880 * / \ / \ / \ / \
1881 * L0 OO OO OO OO OO OO NN NN
1882 * (b) (c) (b) (c)
1883 *
1884 * When calling qgroup_trace_extent_swap(), we will pass:
1885 * @src_eb = OO(a)
1886 * @dst_path = [ nodes[1] = NN(a), nodes[0] = NN(c) ]
1887 * @dst_level = 0
1888 * @root_level = 1
1889 *
1890 * In that case, qgroup_trace_extent_swap() will search from OO(a) to
1891 * reach OO(c), then mark both OO(c) and NN(c) as qgroup dirty.
1892 *
1893 * The main work of qgroup_trace_extent_swap() can be split into 3 parts:
1894 *
1895 * 1) Tree search from @src_eb
1896 * It should acts as a simplified btrfs_search_slot().
1897 * The key for search can be extracted from @dst_path->nodes[dst_level]
1898 * (first key).
1899 *
1900 * 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
1901 * NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
Andrea Gelmini52042d82018-11-28 12:05:13 +01001902 * They should be marked during previous (@dst_level = 1) iteration.
Qu Wenruo25982562018-09-27 14:42:30 +08001903 *
1904 * 3) Mark file extents in leaves dirty
1905 * We don't have good way to pick out new file extents only.
1906 * So we still follow the old method by scanning all file extents in
1907 * the leave.
1908 *
Andrea Gelmini52042d82018-11-28 12:05:13 +01001909 * This function can free us from keeping two paths, thus later we only need
Qu Wenruo25982562018-09-27 14:42:30 +08001910 * to care about how to iterate all new tree blocks in reloc tree.
1911 */
1912static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
1913 struct extent_buffer *src_eb,
1914 struct btrfs_path *dst_path,
Qu Wenruo3d0174f2018-09-27 14:42:35 +08001915 int dst_level, int root_level,
1916 bool trace_leaf)
Qu Wenruo25982562018-09-27 14:42:30 +08001917{
1918 struct btrfs_key key;
1919 struct btrfs_path *src_path;
1920 struct btrfs_fs_info *fs_info = trans->fs_info;
1921 u32 nodesize = fs_info->nodesize;
1922 int cur_level = root_level;
1923 int ret;
1924
1925 BUG_ON(dst_level > root_level);
1926 /* Level mismatch */
1927 if (btrfs_header_level(src_eb) != root_level)
1928 return -EINVAL;
1929
1930 src_path = btrfs_alloc_path();
1931 if (!src_path) {
1932 ret = -ENOMEM;
1933 goto out;
1934 }
1935
1936 if (dst_level)
1937 btrfs_node_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1938 else
1939 btrfs_item_key_to_cpu(dst_path->nodes[dst_level], &key, 0);
1940
1941 /* For src_path */
David Sterba67439da2019-10-08 13:28:47 +02001942 atomic_inc(&src_eb->refs);
Qu Wenruo25982562018-09-27 14:42:30 +08001943 src_path->nodes[root_level] = src_eb;
1944 src_path->slots[root_level] = dst_path->slots[root_level];
1945 src_path->locks[root_level] = 0;
1946
1947 /* A simplified version of btrfs_search_slot() */
1948 while (cur_level >= dst_level) {
1949 struct btrfs_key src_key;
1950 struct btrfs_key dst_key;
1951
1952 if (src_path->nodes[cur_level] == NULL) {
Qu Wenruo25982562018-09-27 14:42:30 +08001953 struct extent_buffer *eb;
1954 int parent_slot;
Qu Wenruo25982562018-09-27 14:42:30 +08001955
1956 eb = src_path->nodes[cur_level + 1];
1957 parent_slot = src_path->slots[cur_level + 1];
Qu Wenruo25982562018-09-27 14:42:30 +08001958
Josef Bacik6b2cb7c2020-11-05 10:45:15 -05001959 eb = btrfs_read_node_slot(eb, parent_slot);
Qu Wenruo25982562018-09-27 14:42:30 +08001960 if (IS_ERR(eb)) {
1961 ret = PTR_ERR(eb);
1962 goto out;
Qu Wenruo25982562018-09-27 14:42:30 +08001963 }
1964
1965 src_path->nodes[cur_level] = eb;
1966
1967 btrfs_tree_read_lock(eb);
Josef Bacikac5887c2020-08-20 11:46:10 -04001968 src_path->locks[cur_level] = BTRFS_READ_LOCK;
Qu Wenruo25982562018-09-27 14:42:30 +08001969 }
1970
1971 src_path->slots[cur_level] = dst_path->slots[cur_level];
1972 if (cur_level) {
1973 btrfs_node_key_to_cpu(dst_path->nodes[cur_level],
1974 &dst_key, dst_path->slots[cur_level]);
1975 btrfs_node_key_to_cpu(src_path->nodes[cur_level],
1976 &src_key, src_path->slots[cur_level]);
1977 } else {
1978 btrfs_item_key_to_cpu(dst_path->nodes[cur_level],
1979 &dst_key, dst_path->slots[cur_level]);
1980 btrfs_item_key_to_cpu(src_path->nodes[cur_level],
1981 &src_key, src_path->slots[cur_level]);
1982 }
1983 /* Content mismatch, something went wrong */
1984 if (btrfs_comp_cpu_keys(&dst_key, &src_key)) {
1985 ret = -ENOENT;
1986 goto out;
1987 }
1988 cur_level--;
1989 }
1990
1991 /*
1992 * Now both @dst_path and @src_path have been populated, record the tree
1993 * blocks for qgroup accounting.
1994 */
1995 ret = btrfs_qgroup_trace_extent(trans, src_path->nodes[dst_level]->start,
1996 nodesize, GFP_NOFS);
1997 if (ret < 0)
1998 goto out;
1999 ret = btrfs_qgroup_trace_extent(trans,
2000 dst_path->nodes[dst_level]->start,
2001 nodesize, GFP_NOFS);
2002 if (ret < 0)
2003 goto out;
2004
2005 /* Record leaf file extents */
Qu Wenruo3d0174f2018-09-27 14:42:35 +08002006 if (dst_level == 0 && trace_leaf) {
Qu Wenruo25982562018-09-27 14:42:30 +08002007 ret = btrfs_qgroup_trace_leaf_items(trans, src_path->nodes[0]);
2008 if (ret < 0)
2009 goto out;
2010 ret = btrfs_qgroup_trace_leaf_items(trans, dst_path->nodes[0]);
2011 }
2012out:
2013 btrfs_free_path(src_path);
2014 return ret;
2015}
2016
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002017/*
2018 * Helper function to do recursive generation-aware depth-first search, to
2019 * locate all new tree blocks in a subtree of reloc tree.
2020 *
2021 * E.g. (OO = Old tree blocks, NN = New tree blocks, whose gen == last_snapshot)
2022 * reloc tree
2023 * L2 NN (a)
2024 * / \
2025 * L1 OO NN (b)
2026 * / \ / \
2027 * L0 OO OO OO NN
2028 * (c) (d)
2029 * If we pass:
2030 * @dst_path = [ nodes[1] = NN(b), nodes[0] = NULL ],
2031 * @cur_level = 1
2032 * @root_level = 1
2033 *
2034 * We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
2035 * above tree blocks along with their counter parts in file tree.
Andrea Gelmini52042d82018-11-28 12:05:13 +01002036 * While during search, old tree blocks OO(c) will be skipped as tree block swap
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002037 * won't affect OO(c).
2038 */
2039static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2040 struct extent_buffer *src_eb,
2041 struct btrfs_path *dst_path,
2042 int cur_level, int root_level,
Qu Wenruo3d0174f2018-09-27 14:42:35 +08002043 u64 last_snapshot, bool trace_leaf)
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002044{
2045 struct btrfs_fs_info *fs_info = trans->fs_info;
2046 struct extent_buffer *eb;
2047 bool need_cleanup = false;
2048 int ret = 0;
2049 int i;
2050
2051 /* Level sanity check */
Nikolay Borisov7ff2c2a2019-03-18 17:45:19 +02002052 if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2053 root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002054 root_level < cur_level) {
2055 btrfs_err_rl(fs_info,
2056 "%s: bad levels, cur_level=%d root_level=%d",
2057 __func__, cur_level, root_level);
2058 return -EUCLEAN;
2059 }
2060
2061 /* Read the tree block if needed */
2062 if (dst_path->nodes[cur_level] == NULL) {
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002063 int parent_slot;
2064 u64 child_gen;
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002065
2066 /*
2067 * dst_path->nodes[root_level] must be initialized before
2068 * calling this function.
2069 */
2070 if (cur_level == root_level) {
2071 btrfs_err_rl(fs_info,
2072 "%s: dst_path->nodes[%d] not initialized, root_level=%d cur_level=%d",
2073 __func__, root_level, root_level, cur_level);
2074 return -EUCLEAN;
2075 }
2076
2077 /*
2078 * We need to get child blockptr/gen from parent before we can
2079 * read it.
2080 */
2081 eb = dst_path->nodes[cur_level + 1];
2082 parent_slot = dst_path->slots[cur_level + 1];
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002083 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002084
2085 /* This node is old, no need to trace */
2086 if (child_gen < last_snapshot)
2087 goto out;
2088
Josef Bacik3acfbd62020-11-05 10:45:16 -05002089 eb = btrfs_read_node_slot(eb, parent_slot);
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002090 if (IS_ERR(eb)) {
2091 ret = PTR_ERR(eb);
2092 goto out;
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002093 }
2094
2095 dst_path->nodes[cur_level] = eb;
2096 dst_path->slots[cur_level] = 0;
2097
2098 btrfs_tree_read_lock(eb);
Josef Bacikac5887c2020-08-20 11:46:10 -04002099 dst_path->locks[cur_level] = BTRFS_READ_LOCK;
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002100 need_cleanup = true;
2101 }
2102
2103 /* Now record this tree block and its counter part for qgroups */
2104 ret = qgroup_trace_extent_swap(trans, src_eb, dst_path, cur_level,
Qu Wenruo3d0174f2018-09-27 14:42:35 +08002105 root_level, trace_leaf);
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002106 if (ret < 0)
2107 goto cleanup;
2108
2109 eb = dst_path->nodes[cur_level];
2110
2111 if (cur_level > 0) {
2112 /* Iterate all child tree blocks */
2113 for (i = 0; i < btrfs_header_nritems(eb); i++) {
2114 /* Skip old tree blocks as they won't be swapped */
2115 if (btrfs_node_ptr_generation(eb, i) < last_snapshot)
2116 continue;
2117 dst_path->slots[cur_level] = i;
2118
2119 /* Recursive call (at most 7 times) */
2120 ret = qgroup_trace_new_subtree_blocks(trans, src_eb,
2121 dst_path, cur_level - 1, root_level,
Qu Wenruo3d0174f2018-09-27 14:42:35 +08002122 last_snapshot, trace_leaf);
Qu Wenruoea49f3e2018-09-27 14:42:31 +08002123 if (ret < 0)
2124 goto cleanup;
2125 }
2126 }
2127
2128cleanup:
2129 if (need_cleanup) {
2130 /* Clean up */
2131 btrfs_tree_unlock_rw(dst_path->nodes[cur_level],
2132 dst_path->locks[cur_level]);
2133 free_extent_buffer(dst_path->nodes[cur_level]);
2134 dst_path->nodes[cur_level] = NULL;
2135 dst_path->slots[cur_level] = 0;
2136 dst_path->locks[cur_level] = 0;
2137 }
2138out:
2139 return ret;
2140}
2141
Qu Wenruo5aea1a42019-01-23 15:15:15 +08002142static int qgroup_trace_subtree_swap(struct btrfs_trans_handle *trans,
2143 struct extent_buffer *src_eb,
2144 struct extent_buffer *dst_eb,
2145 u64 last_snapshot, bool trace_leaf)
2146{
2147 struct btrfs_fs_info *fs_info = trans->fs_info;
2148 struct btrfs_path *dst_path = NULL;
2149 int level;
2150 int ret;
2151
2152 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
2153 return 0;
2154
2155 /* Wrong parameter order */
2156 if (btrfs_header_generation(src_eb) > btrfs_header_generation(dst_eb)) {
2157 btrfs_err_rl(fs_info,
2158 "%s: bad parameter order, src_gen=%llu dst_gen=%llu", __func__,
2159 btrfs_header_generation(src_eb),
2160 btrfs_header_generation(dst_eb));
2161 return -EUCLEAN;
2162 }
2163
2164 if (!extent_buffer_uptodate(src_eb) || !extent_buffer_uptodate(dst_eb)) {
2165 ret = -EIO;
2166 goto out;
2167 }
2168
2169 level = btrfs_header_level(dst_eb);
2170 dst_path = btrfs_alloc_path();
2171 if (!dst_path) {
2172 ret = -ENOMEM;
2173 goto out;
2174 }
2175 /* For dst_path */
David Sterba67439da2019-10-08 13:28:47 +02002176 atomic_inc(&dst_eb->refs);
Qu Wenruo5aea1a42019-01-23 15:15:15 +08002177 dst_path->nodes[level] = dst_eb;
2178 dst_path->slots[level] = 0;
2179 dst_path->locks[level] = 0;
2180
2181 /* Do the generation aware breadth-first search */
2182 ret = qgroup_trace_new_subtree_blocks(trans, src_eb, dst_path, level,
2183 level, last_snapshot, trace_leaf);
2184 if (ret < 0)
2185 goto out;
2186 ret = 0;
2187
2188out:
2189 btrfs_free_path(dst_path);
2190 if (ret < 0)
2191 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2192 return ret;
2193}
2194
Qu Wenruo33d1f052016-10-18 09:31:28 +08002195int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
Qu Wenruo33d1f052016-10-18 09:31:28 +08002196 struct extent_buffer *root_eb,
2197 u64 root_gen, int root_level)
2198{
Lu Fengqideb40622018-07-18 14:45:38 +08002199 struct btrfs_fs_info *fs_info = trans->fs_info;
Qu Wenruo33d1f052016-10-18 09:31:28 +08002200 int ret = 0;
2201 int level;
2202 struct extent_buffer *eb = root_eb;
2203 struct btrfs_path *path = NULL;
2204
Nikolay Borisovb6e6bca2017-07-12 09:42:19 +03002205 BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002206 BUG_ON(root_eb == NULL);
2207
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002208 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
Qu Wenruo33d1f052016-10-18 09:31:28 +08002209 return 0;
2210
2211 if (!extent_buffer_uptodate(root_eb)) {
Qu Wenruo581c1762018-03-29 09:08:11 +08002212 ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002213 if (ret)
2214 goto out;
2215 }
2216
2217 if (root_level == 0) {
Lu Fengqi8d38d7e2018-07-18 14:45:37 +08002218 ret = btrfs_qgroup_trace_leaf_items(trans, root_eb);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002219 goto out;
2220 }
2221
2222 path = btrfs_alloc_path();
2223 if (!path)
2224 return -ENOMEM;
2225
2226 /*
2227 * Walk down the tree. Missing extent blocks are filled in as
2228 * we go. Metadata is accounted every time we read a new
2229 * extent block.
2230 *
2231 * When we reach a leaf, we account for file extent items in it,
2232 * walk back up the tree (adjusting slot pointers as we go)
2233 * and restart the search process.
2234 */
David Sterba67439da2019-10-08 13:28:47 +02002235 atomic_inc(&root_eb->refs); /* For path */
Qu Wenruo33d1f052016-10-18 09:31:28 +08002236 path->nodes[root_level] = root_eb;
2237 path->slots[root_level] = 0;
2238 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
2239walk_down:
2240 level = root_level;
2241 while (level >= 0) {
2242 if (path->nodes[level] == NULL) {
2243 int parent_slot;
Qu Wenruo33d1f052016-10-18 09:31:28 +08002244 u64 child_bytenr;
2245
2246 /*
Josef Bacik182c79f2020-11-05 10:45:17 -05002247 * We need to get child blockptr from parent before we
2248 * can read it.
Qu Wenruo33d1f052016-10-18 09:31:28 +08002249 */
2250 eb = path->nodes[level + 1];
2251 parent_slot = path->slots[level + 1];
2252 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002253
Josef Bacik182c79f2020-11-05 10:45:17 -05002254 eb = btrfs_read_node_slot(eb, parent_slot);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002255 if (IS_ERR(eb)) {
2256 ret = PTR_ERR(eb);
2257 goto out;
Qu Wenruo33d1f052016-10-18 09:31:28 +08002258 }
2259
2260 path->nodes[level] = eb;
2261 path->slots[level] = 0;
2262
2263 btrfs_tree_read_lock(eb);
Josef Bacikac5887c2020-08-20 11:46:10 -04002264 path->locks[level] = BTRFS_READ_LOCK;
Qu Wenruo33d1f052016-10-18 09:31:28 +08002265
Lu Fengqia95f3aa2018-07-18 16:28:03 +08002266 ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002267 fs_info->nodesize,
2268 GFP_NOFS);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002269 if (ret)
2270 goto out;
2271 }
2272
2273 if (level == 0) {
Lu Fengqi8d38d7e2018-07-18 14:45:37 +08002274 ret = btrfs_qgroup_trace_leaf_items(trans,
2275 path->nodes[level]);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002276 if (ret)
2277 goto out;
2278
2279 /* Nonzero return here means we completed our search */
David Sterba15b34512017-02-10 20:30:23 +01002280 ret = adjust_slots_upwards(path, root_level);
Qu Wenruo33d1f052016-10-18 09:31:28 +08002281 if (ret)
2282 break;
2283
2284 /* Restart search with new slots */
2285 goto walk_down;
2286 }
2287
2288 level--;
2289 }
2290
2291 ret = 0;
2292out:
2293 btrfs_free_path(path);
2294
2295 return ret;
2296}
2297
Qu Wenruod810ef22015-04-12 16:52:34 +08002298#define UPDATE_NEW 0
2299#define UPDATE_OLD 1
2300/*
2301 * Walk all of the roots that points to the bytenr and adjust their refcnts.
2302 */
2303static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
2304 struct ulist *roots, struct ulist *tmp,
2305 struct ulist *qgroups, u64 seq, int update_old)
2306{
2307 struct ulist_node *unode;
2308 struct ulist_iterator uiter;
2309 struct ulist_node *tmp_unode;
2310 struct ulist_iterator tmp_uiter;
2311 struct btrfs_qgroup *qg;
2312 int ret = 0;
2313
2314 if (!roots)
2315 return 0;
2316 ULIST_ITER_INIT(&uiter);
2317 while ((unode = ulist_next(roots, &uiter))) {
2318 qg = find_qgroup_rb(fs_info, unode->val);
2319 if (!qg)
2320 continue;
2321
2322 ulist_reinit(tmp);
David Sterbaef2fff62016-10-26 16:23:50 +02002323 ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
Qu Wenruod810ef22015-04-12 16:52:34 +08002324 GFP_ATOMIC);
2325 if (ret < 0)
2326 return ret;
David Sterbaef2fff62016-10-26 16:23:50 +02002327 ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
Qu Wenruod810ef22015-04-12 16:52:34 +08002328 if (ret < 0)
2329 return ret;
2330 ULIST_ITER_INIT(&tmp_uiter);
2331 while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
2332 struct btrfs_qgroup_list *glist;
2333
David Sterbaef2fff62016-10-26 16:23:50 +02002334 qg = unode_aux_to_qgroup(tmp_unode);
Qu Wenruod810ef22015-04-12 16:52:34 +08002335 if (update_old)
2336 btrfs_qgroup_update_old_refcnt(qg, seq, 1);
2337 else
2338 btrfs_qgroup_update_new_refcnt(qg, seq, 1);
2339 list_for_each_entry(glist, &qg->groups, next_group) {
2340 ret = ulist_add(qgroups, glist->group->qgroupid,
David Sterbaef2fff62016-10-26 16:23:50 +02002341 qgroup_to_aux(glist->group),
Qu Wenruod810ef22015-04-12 16:52:34 +08002342 GFP_ATOMIC);
2343 if (ret < 0)
2344 return ret;
2345 ret = ulist_add(tmp, glist->group->qgroupid,
David Sterbaef2fff62016-10-26 16:23:50 +02002346 qgroup_to_aux(glist->group),
Qu Wenruod810ef22015-04-12 16:52:34 +08002347 GFP_ATOMIC);
2348 if (ret < 0)
2349 return ret;
2350 }
2351 }
2352 }
2353 return 0;
2354}
2355
Josef Bacikfcebe452014-05-13 17:30:47 -07002356/*
Qu Wenruo823ae5b2015-04-12 16:59:57 +08002357 * Update qgroup rfer/excl counters.
2358 * Rfer update is easy, codes can explain themselves.
Qu Wenruoe69bcee2015-04-17 10:23:16 +08002359 *
Randy Dunlap260db432020-08-04 19:48:34 -07002360 * Excl update is tricky, the update is split into 2 parts.
Qu Wenruo823ae5b2015-04-12 16:59:57 +08002361 * Part 1: Possible exclusive <-> sharing detect:
2362 * | A | !A |
2363 * -------------------------------------
2364 * B | * | - |
2365 * -------------------------------------
2366 * !B | + | ** |
2367 * -------------------------------------
2368 *
2369 * Conditions:
2370 * A: cur_old_roots < nr_old_roots (not exclusive before)
2371 * !A: cur_old_roots == nr_old_roots (possible exclusive before)
2372 * B: cur_new_roots < nr_new_roots (not exclusive now)
Nicholas D Steeves01327612016-05-19 21:18:45 -04002373 * !B: cur_new_roots == nr_new_roots (possible exclusive now)
Qu Wenruo823ae5b2015-04-12 16:59:57 +08002374 *
2375 * Results:
2376 * +: Possible sharing -> exclusive -: Possible exclusive -> sharing
2377 * *: Definitely not changed. **: Possible unchanged.
2378 *
2379 * For !A and !B condition, the exception is cur_old/new_roots == 0 case.
2380 *
2381 * To make the logic clear, we first use condition A and B to split
2382 * combination into 4 results.
2383 *
2384 * Then, for result "+" and "-", check old/new_roots == 0 case, as in them
2385 * only on variant maybe 0.
2386 *
2387 * Lastly, check result **, since there are 2 variants maybe 0, split them
2388 * again(2x2).
2389 * But this time we don't need to consider other things, the codes and logic
2390 * is easy to understand now.
2391 */
2392static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
2393 struct ulist *qgroups,
2394 u64 nr_old_roots,
2395 u64 nr_new_roots,
2396 u64 num_bytes, u64 seq)
2397{
2398 struct ulist_node *unode;
2399 struct ulist_iterator uiter;
2400 struct btrfs_qgroup *qg;
2401 u64 cur_new_count, cur_old_count;
2402
2403 ULIST_ITER_INIT(&uiter);
2404 while ((unode = ulist_next(qgroups, &uiter))) {
2405 bool dirty = false;
2406
David Sterbaef2fff62016-10-26 16:23:50 +02002407 qg = unode_aux_to_qgroup(unode);
Qu Wenruo823ae5b2015-04-12 16:59:57 +08002408 cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
2409 cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
2410
Qu Wenruo8b317902018-04-30 15:04:44 +08002411 trace_qgroup_update_counters(fs_info, qg, cur_old_count,
2412 cur_new_count);
Mark Fasheh0f5dcf82016-03-29 17:19:55 -07002413
Qu Wenruo823ae5b2015-04-12 16:59:57 +08002414 /* Rfer update part */
2415 if (cur_old_count == 0 && cur_new_count > 0) {
2416 qg->rfer += num_bytes;
2417 qg->rfer_cmpr += num_bytes;
2418 dirty = true;
2419 }
2420 if (cur_old_count > 0 && cur_new_count == 0) {
2421 qg->rfer -= num_bytes;
2422 qg->rfer_cmpr -= num_bytes;
2423 dirty = true;
2424 }
2425
2426 /* Excl update part */
2427 /* Exclusive/none -> shared case */
2428 if (cur_old_count == nr_old_roots &&
2429 cur_new_count < nr_new_roots) {
2430 /* Exclusive -> shared */
2431 if (cur_old_count != 0) {
2432 qg->excl -= num_bytes;
2433 qg->excl_cmpr -= num_bytes;
2434 dirty = true;
2435 }
2436 }
2437
2438 /* Shared -> exclusive/none case */
2439 if (cur_old_count < nr_old_roots &&
2440 cur_new_count == nr_new_roots) {
2441 /* Shared->exclusive */
2442 if (cur_new_count != 0) {
2443 qg->excl += num_bytes;
2444 qg->excl_cmpr += num_bytes;
2445 dirty = true;
2446 }
2447 }
2448
2449 /* Exclusive/none -> exclusive/none case */
2450 if (cur_old_count == nr_old_roots &&
2451 cur_new_count == nr_new_roots) {
2452 if (cur_old_count == 0) {
2453 /* None -> exclusive/none */
2454
2455 if (cur_new_count != 0) {
2456 /* None -> exclusive */
2457 qg->excl += num_bytes;
2458 qg->excl_cmpr += num_bytes;
2459 dirty = true;
2460 }
2461 /* None -> none, nothing changed */
2462 } else {
2463 /* Exclusive -> exclusive/none */
2464
2465 if (cur_new_count == 0) {
2466 /* Exclusive -> none */
2467 qg->excl -= num_bytes;
2468 qg->excl_cmpr -= num_bytes;
2469 dirty = true;
2470 }
2471 /* Exclusive -> exclusive, nothing changed */
2472 }
2473 }
Qu Wenruoc05f9422015-08-03 14:44:29 +08002474
Qu Wenruo823ae5b2015-04-12 16:59:57 +08002475 if (dirty)
2476 qgroup_dirty(fs_info, qg);
2477 }
2478 return 0;
2479}
2480
Qu Wenruo5edfd9f2017-02-27 15:10:34 +08002481/*
2482 * Check if the @roots potentially is a list of fs tree roots
2483 *
2484 * Return 0 for definitely not a fs/subvol tree roots ulist
2485 * Return 1 for possible fs/subvol tree roots in the list (considering an empty
2486 * one as well)
2487 */
2488static int maybe_fs_roots(struct ulist *roots)
2489{
2490 struct ulist_node *unode;
2491 struct ulist_iterator uiter;
2492
2493 /* Empty one, still possible for fs roots */
2494 if (!roots || roots->nnodes == 0)
2495 return 1;
2496
2497 ULIST_ITER_INIT(&uiter);
2498 unode = ulist_next(roots, &uiter);
2499 if (!unode)
2500 return 1;
2501
2502 /*
2503 * If it contains fs tree roots, then it must belong to fs/subvol
2504 * trees.
2505 * If it contains a non-fs tree, it won't be shared with fs/subvol trees.
2506 */
2507 return is_fstree(unode->val);
2508}
2509
Lu Fengqi8696d762018-07-18 14:45:39 +08002510int btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, u64 bytenr,
2511 u64 num_bytes, struct ulist *old_roots,
2512 struct ulist *new_roots)
Qu Wenruo550d7a22015-04-16 15:37:33 +08002513{
Lu Fengqi8696d762018-07-18 14:45:39 +08002514 struct btrfs_fs_info *fs_info = trans->fs_info;
Qu Wenruo550d7a22015-04-16 15:37:33 +08002515 struct ulist *qgroups = NULL;
2516 struct ulist *tmp = NULL;
2517 u64 seq;
2518 u64 nr_new_roots = 0;
2519 u64 nr_old_roots = 0;
2520 int ret = 0;
2521
Johannes Thumshirn26ef8492020-01-08 21:07:32 +09002522 /*
2523 * If quotas get disabled meanwhile, the resouces need to be freed and
2524 * we can't just exit here.
2525 */
David Sterba81353d52017-02-13 14:05:24 +01002526 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
Johannes Thumshirn26ef8492020-01-08 21:07:32 +09002527 goto out_free;
David Sterba81353d52017-02-13 14:05:24 +01002528
Qu Wenruo5edfd9f2017-02-27 15:10:34 +08002529 if (new_roots) {
2530 if (!maybe_fs_roots(new_roots))
2531 goto out_free;
Qu Wenruo550d7a22015-04-16 15:37:33 +08002532 nr_new_roots = new_roots->nnodes;
Qu Wenruo5edfd9f2017-02-27 15:10:34 +08002533 }
2534 if (old_roots) {
2535 if (!maybe_fs_roots(old_roots))
2536 goto out_free;
Qu Wenruo550d7a22015-04-16 15:37:33 +08002537 nr_old_roots = old_roots->nnodes;
Qu Wenruo5edfd9f2017-02-27 15:10:34 +08002538 }
2539
2540 /* Quick exit, either not fs tree roots, or won't affect any qgroup */
2541 if (nr_old_roots == 0 && nr_new_roots == 0)
2542 goto out_free;
Qu Wenruo550d7a22015-04-16 15:37:33 +08002543
Qu Wenruo550d7a22015-04-16 15:37:33 +08002544 BUG_ON(!fs_info->quota_root);
2545
Qu Wenruoc9f6f3c2018-05-03 09:59:02 +08002546 trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr,
2547 num_bytes, nr_old_roots, nr_new_roots);
Mark Fasheh0f5dcf82016-03-29 17:19:55 -07002548
Qu Wenruo550d7a22015-04-16 15:37:33 +08002549 qgroups = ulist_alloc(GFP_NOFS);
2550 if (!qgroups) {
2551 ret = -ENOMEM;
2552 goto out_free;
2553 }
2554 tmp = ulist_alloc(GFP_NOFS);
2555 if (!tmp) {
2556 ret = -ENOMEM;
2557 goto out_free;
2558 }
2559
2560 mutex_lock(&fs_info->qgroup_rescan_lock);
2561 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
2562 if (fs_info->qgroup_rescan_progress.objectid <= bytenr) {
2563 mutex_unlock(&fs_info->qgroup_rescan_lock);
2564 ret = 0;
2565 goto out_free;
2566 }
2567 }
2568 mutex_unlock(&fs_info->qgroup_rescan_lock);
2569
2570 spin_lock(&fs_info->qgroup_lock);
2571 seq = fs_info->qgroup_seq;
2572
2573 /* Update old refcnts using old_roots */
2574 ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq,
2575 UPDATE_OLD);
2576 if (ret < 0)
2577 goto out;
2578
2579 /* Update new refcnts using new_roots */
2580 ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq,
2581 UPDATE_NEW);
2582 if (ret < 0)
2583 goto out;
2584
2585 qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots,
2586 num_bytes, seq);
2587
2588 /*
2589 * Bump qgroup_seq to avoid seq overlap
2590 */
2591 fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1;
2592out:
2593 spin_unlock(&fs_info->qgroup_lock);
2594out_free:
2595 ulist_free(tmp);
2596 ulist_free(qgroups);
2597 ulist_free(old_roots);
2598 ulist_free(new_roots);
2599 return ret;
2600}
2601
Nikolay Borisov460fb202018-03-15 16:00:25 +02002602int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
Qu Wenruo550d7a22015-04-16 15:37:33 +08002603{
Nikolay Borisov460fb202018-03-15 16:00:25 +02002604 struct btrfs_fs_info *fs_info = trans->fs_info;
Qu Wenruo550d7a22015-04-16 15:37:33 +08002605 struct btrfs_qgroup_extent_record *record;
2606 struct btrfs_delayed_ref_root *delayed_refs;
2607 struct ulist *new_roots = NULL;
2608 struct rb_node *node;
Qu Wenruoc337e7b2018-09-27 14:42:29 +08002609 u64 num_dirty_extents = 0;
Qu Wenruo9086db82015-04-20 09:53:50 +08002610 u64 qgroup_to_skip;
Qu Wenruo550d7a22015-04-16 15:37:33 +08002611 int ret = 0;
2612
2613 delayed_refs = &trans->transaction->delayed_refs;
Qu Wenruo9086db82015-04-20 09:53:50 +08002614 qgroup_to_skip = delayed_refs->qgroup_to_skip;
Qu Wenruo550d7a22015-04-16 15:37:33 +08002615 while ((node = rb_first(&delayed_refs->dirty_extent_root))) {
2616 record = rb_entry(node, struct btrfs_qgroup_extent_record,
2617 node);
2618
Qu Wenruoc337e7b2018-09-27 14:42:29 +08002619 num_dirty_extents++;
Jeff Mahoneybc074522016-06-09 17:27:55 -04002620 trace_btrfs_qgroup_account_extents(fs_info, record);
Mark Fasheh0f5dcf82016-03-29 17:19:55 -07002621
Qu Wenruo550d7a22015-04-16 15:37:33 +08002622 if (!ret) {
2623 /*
Qu Wenruod1b8b942017-02-27 15:10:35 +08002624 * Old roots should be searched when inserting qgroup
2625 * extent record
2626 */
2627 if (WARN_ON(!record->old_roots)) {
2628 /* Search commit root to find old_roots */
2629 ret = btrfs_find_all_roots(NULL, fs_info,
2630 record->bytenr, 0,
Zygo Blaxellc995ab32017-09-22 13:58:45 -04002631 &record->old_roots, false);
Qu Wenruod1b8b942017-02-27 15:10:35 +08002632 if (ret < 0)
2633 goto cleanup;
2634 }
2635
Qu Wenruo1418bae2019-01-23 15:15:12 +08002636 /* Free the reserved data space */
2637 btrfs_qgroup_free_refroot(fs_info,
2638 record->data_rsv_refroot,
2639 record->data_rsv,
2640 BTRFS_QGROUP_RSV_DATA);
Qu Wenruod1b8b942017-02-27 15:10:35 +08002641 /*
Edmund Nadolskide47c9d2017-03-16 10:04:34 -06002642 * Use SEQ_LAST as time_seq to do special search, which
Qu Wenruo550d7a22015-04-16 15:37:33 +08002643 * doesn't lock tree or delayed_refs and search current
2644 * root. It's safe inside commit_transaction().
2645 */
2646 ret = btrfs_find_all_roots(trans, fs_info,
Zygo Blaxellc995ab32017-09-22 13:58:45 -04002647 record->bytenr, SEQ_LAST, &new_roots, false);
Qu Wenruo550d7a22015-04-16 15:37:33 +08002648 if (ret < 0)
2649 goto cleanup;
Qu Wenruod1b8b942017-02-27 15:10:35 +08002650 if (qgroup_to_skip) {
Qu Wenruo9086db82015-04-20 09:53:50 +08002651 ulist_del(new_roots, qgroup_to_skip, 0);
Qu Wenruod1b8b942017-02-27 15:10:35 +08002652 ulist_del(record->old_roots, qgroup_to_skip,
2653 0);
2654 }
Lu Fengqi8696d762018-07-18 14:45:39 +08002655 ret = btrfs_qgroup_account_extent(trans, record->bytenr,
2656 record->num_bytes,
2657 record->old_roots,
2658 new_roots);
Qu Wenruo550d7a22015-04-16 15:37:33 +08002659 record->old_roots = NULL;
2660 new_roots = NULL;
2661 }
2662cleanup:
2663 ulist_free(record->old_roots);
2664 ulist_free(new_roots);
2665 new_roots = NULL;
2666 rb_erase(node, &delayed_refs->dirty_extent_root);
2667 kfree(record);
2668
2669 }
Qu Wenruoc337e7b2018-09-27 14:42:29 +08002670 trace_qgroup_num_dirty_extents(fs_info, trans->transid,
2671 num_dirty_extents);
Qu Wenruo550d7a22015-04-16 15:37:33 +08002672 return ret;
2673}
2674
Josef Bacikfcebe452014-05-13 17:30:47 -07002675/*
Arne Jansenbed92ea2012-06-28 18:03:02 +02002676 * called from commit_transaction. Writes all changed qgroups to disk.
2677 */
Lu Fengqi280f8bd2018-07-18 14:45:40 +08002678int btrfs_run_qgroups(struct btrfs_trans_handle *trans)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002679{
Lu Fengqi280f8bd2018-07-18 14:45:40 +08002680 struct btrfs_fs_info *fs_info = trans->fs_info;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002681 int ret = 0;
2682
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03002683 if (!fs_info->quota_root)
Nikolay Borisov5d235152018-01-31 10:52:04 +02002684 return ret;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002685
2686 spin_lock(&fs_info->qgroup_lock);
2687 while (!list_empty(&fs_info->dirty_qgroups)) {
2688 struct btrfs_qgroup *qgroup;
2689 qgroup = list_first_entry(&fs_info->dirty_qgroups,
2690 struct btrfs_qgroup, dirty);
2691 list_del_init(&qgroup->dirty);
2692 spin_unlock(&fs_info->qgroup_lock);
Lu Fengqi3e07e9a2018-07-18 14:45:28 +08002693 ret = update_qgroup_info_item(trans, qgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002694 if (ret)
2695 fs_info->qgroup_flags |=
2696 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Lu Fengqiac8a8662018-07-18 14:45:27 +08002697 ret = update_qgroup_limit_item(trans, qgroup);
Dongsheng Yangd3001ed2014-11-20 21:04:56 -05002698 if (ret)
2699 fs_info->qgroup_flags |=
2700 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002701 spin_lock(&fs_info->qgroup_lock);
2702 }
Josef Bacikafcdd122016-09-02 15:40:02 -04002703 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
Arne Jansenbed92ea2012-06-28 18:03:02 +02002704 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON;
2705 else
2706 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON;
2707 spin_unlock(&fs_info->qgroup_lock);
2708
Lu Fengqi2e980ac2018-07-18 14:45:29 +08002709 ret = update_qgroup_status_item(trans);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002710 if (ret)
2711 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
2712
Arne Jansenbed92ea2012-06-28 18:03:02 +02002713 return ret;
2714}
2715
2716/*
Nicholas D Steeves01327612016-05-19 21:18:45 -04002717 * Copy the accounting information between qgroups. This is necessary
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002718 * when a snapshot or a subvolume is created. Throwing an error will
2719 * cause a transaction abort so we take extra care here to only error
2720 * when a readonly fs is a reasonable outcome.
Arne Jansenbed92ea2012-06-28 18:03:02 +02002721 */
Lu Fengqia93774222018-07-18 14:45:41 +08002722int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
2723 u64 objectid, struct btrfs_qgroup_inherit *inherit)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002724{
2725 int ret = 0;
2726 int i;
2727 u64 *i_qgroups;
Qu Wenruoe88439d2019-06-13 17:31:24 +08002728 bool committing = false;
Lu Fengqia93774222018-07-18 14:45:41 +08002729 struct btrfs_fs_info *fs_info = trans->fs_info;
Filipe Manana552f0322018-11-19 16:20:34 +00002730 struct btrfs_root *quota_root;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002731 struct btrfs_qgroup *srcgroup;
2732 struct btrfs_qgroup *dstgroup;
Qu Wenruocbab8ad2020-04-02 14:37:35 +08002733 bool need_rescan = false;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002734 u32 level_size = 0;
Wang Shilong3f5e2d32013-04-07 10:50:19 +00002735 u64 nums;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002736
Qu Wenruoe88439d2019-06-13 17:31:24 +08002737 /*
2738 * There are only two callers of this function.
2739 *
2740 * One in create_subvol() in the ioctl context, which needs to hold
2741 * the qgroup_ioctl_lock.
2742 *
2743 * The other one in create_pending_snapshot() where no other qgroup
2744 * code can modify the fs as they all need to either start a new trans
2745 * or hold a trans handler, thus we don't need to hold
2746 * qgroup_ioctl_lock.
2747 * This would avoid long and complex lock chain and make lockdep happy.
2748 */
2749 spin_lock(&fs_info->trans_lock);
2750 if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
2751 committing = true;
2752 spin_unlock(&fs_info->trans_lock);
2753
2754 if (!committing)
2755 mutex_lock(&fs_info->qgroup_ioctl_lock);
Josef Bacikafcdd122016-09-02 15:40:02 -04002756 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
Wang Shilongf2f6ed32013-04-07 10:50:16 +00002757 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002758
Filipe Manana552f0322018-11-19 16:20:34 +00002759 quota_root = fs_info->quota_root;
Wang Shilongf2f6ed32013-04-07 10:50:16 +00002760 if (!quota_root) {
2761 ret = -EINVAL;
2762 goto out;
2763 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002764
Wang Shilong3f5e2d32013-04-07 10:50:19 +00002765 if (inherit) {
2766 i_qgroups = (u64 *)(inherit + 1);
2767 nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
2768 2 * inherit->num_excl_copies;
2769 for (i = 0; i < nums; ++i) {
2770 srcgroup = find_qgroup_rb(fs_info, *i_qgroups);
Dongsheng Yang09870d22014-11-11 07:18:22 -05002771
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002772 /*
2773 * Zero out invalid groups so we can ignore
2774 * them later.
2775 */
2776 if (!srcgroup ||
2777 ((srcgroup->qgroupid >> 48) <= (objectid >> 48)))
2778 *i_qgroups = 0ULL;
2779
Wang Shilong3f5e2d32013-04-07 10:50:19 +00002780 ++i_qgroups;
2781 }
2782 }
2783
Arne Jansenbed92ea2012-06-28 18:03:02 +02002784 /*
2785 * create a tracking group for the subvol itself
2786 */
2787 ret = add_qgroup_item(trans, quota_root, objectid);
2788 if (ret)
2789 goto out;
2790
Arne Jansenbed92ea2012-06-28 18:03:02 +02002791 /*
2792 * add qgroup to all inherited groups
2793 */
2794 if (inherit) {
2795 i_qgroups = (u64 *)(inherit + 1);
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002796 for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) {
2797 if (*i_qgroups == 0)
2798 continue;
Lu Fengqi711169c2018-07-18 14:45:24 +08002799 ret = add_qgroup_relation_item(trans, objectid,
2800 *i_qgroups);
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002801 if (ret && ret != -EEXIST)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002802 goto out;
Lu Fengqi711169c2018-07-18 14:45:24 +08002803 ret = add_qgroup_relation_item(trans, *i_qgroups,
2804 objectid);
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002805 if (ret && ret != -EEXIST)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002806 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002807 }
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002808 ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002809 }
2810
2811
2812 spin_lock(&fs_info->qgroup_lock);
2813
2814 dstgroup = add_qgroup_rb(fs_info, objectid);
Dan Carpenter57a5a882012-07-30 02:15:43 -06002815 if (IS_ERR(dstgroup)) {
2816 ret = PTR_ERR(dstgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002817 goto unlock;
Dan Carpenter57a5a882012-07-30 02:15:43 -06002818 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002819
Dongsheng Yange8c85412014-11-20 20:58:34 -05002820 if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) {
Dongsheng Yange8c85412014-11-20 20:58:34 -05002821 dstgroup->lim_flags = inherit->lim.flags;
2822 dstgroup->max_rfer = inherit->lim.max_rfer;
2823 dstgroup->max_excl = inherit->lim.max_excl;
2824 dstgroup->rsv_rfer = inherit->lim.rsv_rfer;
2825 dstgroup->rsv_excl = inherit->lim.rsv_excl;
Dongsheng Yang1510e712014-11-20 21:01:41 -05002826
Lu Fengqiac8a8662018-07-18 14:45:27 +08002827 ret = update_qgroup_limit_item(trans, dstgroup);
Dongsheng Yang1510e712014-11-20 21:01:41 -05002828 if (ret) {
2829 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Jeff Mahoney5d163e02016-09-20 10:05:00 -04002830 btrfs_info(fs_info,
2831 "unable to update quota limit for %llu",
2832 dstgroup->qgroupid);
Dongsheng Yang1510e712014-11-20 21:01:41 -05002833 goto unlock;
2834 }
Dongsheng Yange8c85412014-11-20 20:58:34 -05002835 }
2836
Arne Jansenbed92ea2012-06-28 18:03:02 +02002837 if (srcid) {
2838 srcgroup = find_qgroup_rb(fs_info, srcid);
Chris Masonf3a87f12012-09-14 20:06:30 -04002839 if (!srcgroup)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002840 goto unlock;
Josef Bacikfcebe452014-05-13 17:30:47 -07002841
2842 /*
2843 * We call inherit after we clone the root in order to make sure
2844 * our counts don't go crazy, so at this point the only
2845 * difference between the two roots should be the root node.
2846 */
Lu Fengqic8389d42018-07-17 16:58:22 +08002847 level_size = fs_info->nodesize;
Josef Bacikfcebe452014-05-13 17:30:47 -07002848 dstgroup->rfer = srcgroup->rfer;
2849 dstgroup->rfer_cmpr = srcgroup->rfer_cmpr;
2850 dstgroup->excl = level_size;
2851 dstgroup->excl_cmpr = level_size;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002852 srcgroup->excl = level_size;
2853 srcgroup->excl_cmpr = level_size;
Dongsheng Yang3eeb4d52014-11-20 20:14:38 -05002854
2855 /* inherit the limit info */
2856 dstgroup->lim_flags = srcgroup->lim_flags;
2857 dstgroup->max_rfer = srcgroup->max_rfer;
2858 dstgroup->max_excl = srcgroup->max_excl;
2859 dstgroup->rsv_rfer = srcgroup->rsv_rfer;
2860 dstgroup->rsv_excl = srcgroup->rsv_excl;
2861
Arne Jansenbed92ea2012-06-28 18:03:02 +02002862 qgroup_dirty(fs_info, dstgroup);
2863 qgroup_dirty(fs_info, srcgroup);
2864 }
2865
Chris Masonf3a87f12012-09-14 20:06:30 -04002866 if (!inherit)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002867 goto unlock;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002868
2869 i_qgroups = (u64 *)(inherit + 1);
2870 for (i = 0; i < inherit->num_qgroups; ++i) {
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002871 if (*i_qgroups) {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04002872 ret = add_relation_rb(fs_info, objectid, *i_qgroups);
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002873 if (ret)
2874 goto unlock;
2875 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02002876 ++i_qgroups;
Qu Wenruocbab8ad2020-04-02 14:37:35 +08002877
2878 /*
2879 * If we're doing a snapshot, and adding the snapshot to a new
2880 * qgroup, the numbers are guaranteed to be incorrect.
2881 */
2882 if (srcid)
2883 need_rescan = true;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002884 }
2885
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002886 for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002887 struct btrfs_qgroup *src;
2888 struct btrfs_qgroup *dst;
2889
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002890 if (!i_qgroups[0] || !i_qgroups[1])
2891 continue;
2892
Arne Jansenbed92ea2012-06-28 18:03:02 +02002893 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2894 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2895
2896 if (!src || !dst) {
2897 ret = -EINVAL;
2898 goto unlock;
2899 }
2900
2901 dst->rfer = src->rfer - level_size;
2902 dst->rfer_cmpr = src->rfer_cmpr - level_size;
Qu Wenruocbab8ad2020-04-02 14:37:35 +08002903
2904 /* Manually tweaking numbers certainly needs a rescan */
2905 need_rescan = true;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002906 }
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002907 for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002908 struct btrfs_qgroup *src;
2909 struct btrfs_qgroup *dst;
2910
Mark Fasheh918c2ee2016-03-30 17:57:48 -07002911 if (!i_qgroups[0] || !i_qgroups[1])
2912 continue;
2913
Arne Jansenbed92ea2012-06-28 18:03:02 +02002914 src = find_qgroup_rb(fs_info, i_qgroups[0]);
2915 dst = find_qgroup_rb(fs_info, i_qgroups[1]);
2916
2917 if (!src || !dst) {
2918 ret = -EINVAL;
2919 goto unlock;
2920 }
2921
2922 dst->excl = src->excl + level_size;
2923 dst->excl_cmpr = src->excl_cmpr + level_size;
Qu Wenruocbab8ad2020-04-02 14:37:35 +08002924 need_rescan = true;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002925 }
2926
2927unlock:
2928 spin_unlock(&fs_info->qgroup_lock);
Qu Wenruo49e5fb42020-06-28 13:07:15 +08002929 if (!ret)
2930 ret = btrfs_sysfs_add_one_qgroup(fs_info, dstgroup);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002931out:
Qu Wenruoe88439d2019-06-13 17:31:24 +08002932 if (!committing)
2933 mutex_unlock(&fs_info->qgroup_ioctl_lock);
Qu Wenruocbab8ad2020-04-02 14:37:35 +08002934 if (need_rescan)
2935 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002936 return ret;
2937}
2938
Qu Wenruoadca4d92020-07-13 18:50:49 +08002939static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes)
Jeff Mahoney003d7c52017-01-25 09:50:33 -05002940{
2941 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
Qu Wenruodba21322017-12-12 15:34:25 +08002942 qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer)
Jeff Mahoney003d7c52017-01-25 09:50:33 -05002943 return false;
2944
2945 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) &&
Qu Wenruodba21322017-12-12 15:34:25 +08002946 qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl)
Jeff Mahoney003d7c52017-01-25 09:50:33 -05002947 return false;
2948
2949 return true;
2950}
2951
Qu Wenruodba21322017-12-12 15:34:25 +08002952static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce,
2953 enum btrfs_qgroup_rsv_type type)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002954{
Arne Jansenbed92ea2012-06-28 18:03:02 +02002955 struct btrfs_qgroup *qgroup;
2956 struct btrfs_fs_info *fs_info = root->fs_info;
2957 u64 ref_root = root->root_key.objectid;
2958 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002959 struct ulist_node *unode;
2960 struct ulist_iterator uiter;
2961
2962 if (!is_fstree(ref_root))
2963 return 0;
2964
2965 if (num_bytes == 0)
2966 return 0;
Sargun Dhillonf29efe22017-05-11 21:17:33 +00002967
2968 if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) &&
2969 capable(CAP_SYS_RESOURCE))
2970 enforce = false;
2971
Arne Jansenbed92ea2012-06-28 18:03:02 +02002972 spin_lock(&fs_info->qgroup_lock);
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03002973 if (!fs_info->quota_root)
Arne Jansenbed92ea2012-06-28 18:03:02 +02002974 goto out;
2975
2976 qgroup = find_qgroup_rb(fs_info, ref_root);
2977 if (!qgroup)
2978 goto out;
2979
2980 /*
2981 * in a first step, we check all affected qgroups if any limits would
2982 * be exceeded
2983 */
Wang Shilong1e8f9152013-05-06 11:03:27 +00002984 ulist_reinit(fs_info->qgroup_ulist);
2985 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
David Sterbaa1840b52018-03-27 19:04:50 +02002986 qgroup_to_aux(qgroup), GFP_ATOMIC);
Wang Shilong3c971852013-04-17 14:00:36 +00002987 if (ret < 0)
2988 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02002989 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00002990 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002991 struct btrfs_qgroup *qg;
2992 struct btrfs_qgroup_list *glist;
2993
David Sterbaef2fff62016-10-26 16:23:50 +02002994 qg = unode_aux_to_qgroup(unode);
Arne Jansenbed92ea2012-06-28 18:03:02 +02002995
Qu Wenruoadca4d92020-07-13 18:50:49 +08002996 if (enforce && !qgroup_check_limits(qg, num_bytes)) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02002997 ret = -EDQUOT;
Wang Shilong720f1e22013-03-06 11:51:47 +00002998 goto out;
2999 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02003000
3001 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00003002 ret = ulist_add(fs_info->qgroup_ulist,
3003 glist->group->qgroupid,
David Sterbaa1840b52018-03-27 19:04:50 +02003004 qgroup_to_aux(glist->group), GFP_ATOMIC);
Wang Shilong3c971852013-04-17 14:00:36 +00003005 if (ret < 0)
3006 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02003007 }
3008 }
Wang Shilong3c971852013-04-17 14:00:36 +00003009 ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02003010 /*
3011 * no limits exceeded, now record the reservation into all qgroups
3012 */
3013 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00003014 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02003015 struct btrfs_qgroup *qg;
3016
David Sterbaef2fff62016-10-26 16:23:50 +02003017 qg = unode_aux_to_qgroup(unode);
Arne Jansenbed92ea2012-06-28 18:03:02 +02003018
Qu Wenruo64ee4e72017-12-12 15:34:27 +08003019 qgroup_rsv_add(fs_info, qg, num_bytes, type);
Arne Jansenbed92ea2012-06-28 18:03:02 +02003020 }
3021
3022out:
3023 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02003024 return ret;
3025}
3026
Qu Wenruoe1211d02017-12-12 15:34:30 +08003027/*
3028 * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0
3029 * qgroup).
3030 *
3031 * Will handle all higher level qgroup too.
3032 *
3033 * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup.
3034 * This special case is only used for META_PERTRANS type.
3035 */
Qu Wenruo297d7502015-09-08 17:08:37 +08003036void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
Qu Wenruod4e5c922017-12-12 15:34:23 +08003037 u64 ref_root, u64 num_bytes,
3038 enum btrfs_qgroup_rsv_type type)
Arne Jansenbed92ea2012-06-28 18:03:02 +02003039{
Arne Jansenbed92ea2012-06-28 18:03:02 +02003040 struct btrfs_qgroup *qgroup;
Arne Jansenbed92ea2012-06-28 18:03:02 +02003041 struct ulist_node *unode;
3042 struct ulist_iterator uiter;
Wang Shilong3c971852013-04-17 14:00:36 +00003043 int ret = 0;
Arne Jansenbed92ea2012-06-28 18:03:02 +02003044
3045 if (!is_fstree(ref_root))
3046 return;
3047
3048 if (num_bytes == 0)
3049 return;
3050
Qu Wenruoe1211d02017-12-12 15:34:30 +08003051 if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) {
3052 WARN(1, "%s: Invalid type to free", __func__);
3053 return;
3054 }
Arne Jansenbed92ea2012-06-28 18:03:02 +02003055 spin_lock(&fs_info->qgroup_lock);
3056
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03003057 if (!fs_info->quota_root)
Arne Jansenbed92ea2012-06-28 18:03:02 +02003058 goto out;
3059
3060 qgroup = find_qgroup_rb(fs_info, ref_root);
3061 if (!qgroup)
3062 goto out;
3063
Qu Wenruoe1211d02017-12-12 15:34:30 +08003064 if (num_bytes == (u64)-1)
Qu Wenruo82874752017-12-12 15:34:34 +08003065 /*
3066 * We're freeing all pertrans rsv, get reserved value from
3067 * level 0 qgroup as real num_bytes to free.
3068 */
Qu Wenruoe1211d02017-12-12 15:34:30 +08003069 num_bytes = qgroup->rsv.values[type];
3070
Wang Shilong1e8f9152013-05-06 11:03:27 +00003071 ulist_reinit(fs_info->qgroup_ulist);
3072 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
David Sterbaa1840b52018-03-27 19:04:50 +02003073 qgroup_to_aux(qgroup), GFP_ATOMIC);
Wang Shilong3c971852013-04-17 14:00:36 +00003074 if (ret < 0)
3075 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02003076 ULIST_ITER_INIT(&uiter);
Wang Shilong1e8f9152013-05-06 11:03:27 +00003077 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
Arne Jansenbed92ea2012-06-28 18:03:02 +02003078 struct btrfs_qgroup *qg;
3079 struct btrfs_qgroup_list *glist;
3080
David Sterbaef2fff62016-10-26 16:23:50 +02003081 qg = unode_aux_to_qgroup(unode);
Arne Jansenbed92ea2012-06-28 18:03:02 +02003082
Qu Wenruo64ee4e72017-12-12 15:34:27 +08003083 qgroup_rsv_release(fs_info, qg, num_bytes, type);
Arne Jansenbed92ea2012-06-28 18:03:02 +02003084
3085 list_for_each_entry(glist, &qg->groups, next_group) {
Wang Shilong1e8f9152013-05-06 11:03:27 +00003086 ret = ulist_add(fs_info->qgroup_ulist,
3087 glist->group->qgroupid,
David Sterbaa1840b52018-03-27 19:04:50 +02003088 qgroup_to_aux(glist->group), GFP_ATOMIC);
Wang Shilong3c971852013-04-17 14:00:36 +00003089 if (ret < 0)
3090 goto out;
Arne Jansenbed92ea2012-06-28 18:03:02 +02003091 }
3092 }
3093
3094out:
3095 spin_unlock(&fs_info->qgroup_lock);
Arne Jansenbed92ea2012-06-28 18:03:02 +02003096}
3097
Jan Schmidt2f232032013-04-25 16:04:51 +00003098/*
Qu Wenruoff3d27a02018-05-14 09:38:13 +08003099 * Check if the leaf is the last leaf. Which means all node pointers
3100 * are at their last position.
3101 */
3102static bool is_last_leaf(struct btrfs_path *path)
3103{
3104 int i;
3105
3106 for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
3107 if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
3108 return false;
3109 }
3110 return true;
3111}
3112
3113/*
Jan Schmidt2f232032013-04-25 16:04:51 +00003114 * returns < 0 on error, 0 when more leafs are to be scanned.
Qu Wenruo33931682015-02-27 16:24:24 +08003115 * returns 1 when done.
Jan Schmidt2f232032013-04-25 16:04:51 +00003116 */
Lu Fengqi62088ca2018-07-18 14:45:42 +08003117static int qgroup_rescan_leaf(struct btrfs_trans_handle *trans,
3118 struct btrfs_path *path)
Jan Schmidt2f232032013-04-25 16:04:51 +00003119{
Lu Fengqi62088ca2018-07-18 14:45:42 +08003120 struct btrfs_fs_info *fs_info = trans->fs_info;
Jan Schmidt2f232032013-04-25 16:04:51 +00003121 struct btrfs_key found;
Qu Wenruo0a0e8b892015-10-26 09:19:43 +08003122 struct extent_buffer *scratch_leaf = NULL;
Jan Schmidt2f232032013-04-25 16:04:51 +00003123 struct ulist *roots = NULL;
Josef Bacikfcebe452014-05-13 17:30:47 -07003124 u64 num_bytes;
Qu Wenruoff3d27a02018-05-14 09:38:13 +08003125 bool done;
Jan Schmidt2f232032013-04-25 16:04:51 +00003126 int slot;
3127 int ret;
3128
Jan Schmidt2f232032013-04-25 16:04:51 +00003129 mutex_lock(&fs_info->qgroup_rescan_lock);
3130 ret = btrfs_search_slot_for_read(fs_info->extent_root,
3131 &fs_info->qgroup_rescan_progress,
3132 path, 1, 0);
3133
Jeff Mahoneyab8d0fc2016-09-20 10:05:02 -04003134 btrfs_debug(fs_info,
3135 "current progress key (%llu %u %llu), search_slot ret %d",
3136 fs_info->qgroup_rescan_progress.objectid,
3137 fs_info->qgroup_rescan_progress.type,
3138 fs_info->qgroup_rescan_progress.offset, ret);
Jan Schmidt2f232032013-04-25 16:04:51 +00003139
3140 if (ret) {
3141 /*
3142 * The rescan is about to end, we will not be scanning any
3143 * further blocks. We cannot unset the RESCAN flag here, because
3144 * we want to commit the transaction if everything went well.
3145 * To make the live accounting work in this phase, we set our
3146 * scan progress pointer such that every real extent objectid
3147 * will be smaller.
3148 */
3149 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3150 btrfs_release_path(path);
3151 mutex_unlock(&fs_info->qgroup_rescan_lock);
3152 return ret;
3153 }
Qu Wenruoff3d27a02018-05-14 09:38:13 +08003154 done = is_last_leaf(path);
Jan Schmidt2f232032013-04-25 16:04:51 +00003155
3156 btrfs_item_key_to_cpu(path->nodes[0], &found,
3157 btrfs_header_nritems(path->nodes[0]) - 1);
3158 fs_info->qgroup_rescan_progress.objectid = found.objectid + 1;
3159
Qu Wenruo0a0e8b892015-10-26 09:19:43 +08003160 scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]);
3161 if (!scratch_leaf) {
3162 ret = -ENOMEM;
3163 mutex_unlock(&fs_info->qgroup_rescan_lock);
3164 goto out;
3165 }
Jan Schmidt2f232032013-04-25 16:04:51 +00003166 slot = path->slots[0];
3167 btrfs_release_path(path);
3168 mutex_unlock(&fs_info->qgroup_rescan_lock);
3169
3170 for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) {
3171 btrfs_item_key_to_cpu(scratch_leaf, &found, slot);
Josef Bacik3a6d75e2014-01-23 16:45:10 -05003172 if (found.type != BTRFS_EXTENT_ITEM_KEY &&
3173 found.type != BTRFS_METADATA_ITEM_KEY)
Jan Schmidt2f232032013-04-25 16:04:51 +00003174 continue;
Josef Bacik3a6d75e2014-01-23 16:45:10 -05003175 if (found.type == BTRFS_METADATA_ITEM_KEY)
Jeff Mahoneyda170662016-06-15 09:22:56 -04003176 num_bytes = fs_info->nodesize;
Josef Bacik3a6d75e2014-01-23 16:45:10 -05003177 else
3178 num_bytes = found.offset;
3179
Josef Bacikfcebe452014-05-13 17:30:47 -07003180 ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0,
Zygo Blaxellc995ab32017-09-22 13:58:45 -04003181 &roots, false);
Jan Schmidt2f232032013-04-25 16:04:51 +00003182 if (ret < 0)
3183 goto out;
Qu Wenruo9d220c92015-04-13 11:02:16 +08003184 /* For rescan, just pass old_roots as NULL */
Lu Fengqi8696d762018-07-18 14:45:39 +08003185 ret = btrfs_qgroup_account_extent(trans, found.objectid,
3186 num_bytes, NULL, roots);
Qu Wenruo9d220c92015-04-13 11:02:16 +08003187 if (ret < 0)
Jan Schmidt2f232032013-04-25 16:04:51 +00003188 goto out;
Jan Schmidt2f232032013-04-25 16:04:51 +00003189 }
Jan Schmidt2f232032013-04-25 16:04:51 +00003190out:
Nikolay Borisovdf449712018-08-15 18:26:56 +03003191 if (scratch_leaf)
Qu Wenruo0a0e8b892015-10-26 09:19:43 +08003192 free_extent_buffer(scratch_leaf);
Jan Schmidt2f232032013-04-25 16:04:51 +00003193
Qu Wenruo6f7de192018-06-27 18:19:55 +08003194 if (done && !ret) {
Qu Wenruoff3d27a02018-05-14 09:38:13 +08003195 ret = 1;
Qu Wenruo6f7de192018-06-27 18:19:55 +08003196 fs_info->qgroup_rescan_progress.objectid = (u64)-1;
3197 }
Jan Schmidt2f232032013-04-25 16:04:51 +00003198 return ret;
3199}
3200
Filipe Mananacb13eea2020-12-14 10:10:45 +00003201static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
3202{
3203 return btrfs_fs_closing(fs_info) ||
3204 test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
3205}
3206
Qu Wenruod458b052014-02-28 10:46:19 +08003207static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
Jan Schmidt2f232032013-04-25 16:04:51 +00003208{
Jan Schmidtb382a322013-05-28 15:47:24 +00003209 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
3210 qgroup_rescan_work);
Jan Schmidt2f232032013-04-25 16:04:51 +00003211 struct btrfs_path *path;
3212 struct btrfs_trans_handle *trans = NULL;
Jan Schmidt2f232032013-04-25 16:04:51 +00003213 int err = -ENOMEM;
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003214 int ret = 0;
Filipe Mananacb13eea2020-12-14 10:10:45 +00003215 bool stopped = false;
Jan Schmidt2f232032013-04-25 16:04:51 +00003216
3217 path = btrfs_alloc_path();
3218 if (!path)
3219 goto out;
Qu Wenruob6debf12018-05-14 09:38:12 +08003220 /*
3221 * Rescan should only search for commit root, and any later difference
3222 * should be recorded by qgroup
3223 */
3224 path->search_commit_root = 1;
3225 path->skip_locking = 1;
Jan Schmidt2f232032013-04-25 16:04:51 +00003226
3227 err = 0;
Filipe Mananacb13eea2020-12-14 10:10:45 +00003228 while (!err && !(stopped = rescan_should_stop(fs_info))) {
Jan Schmidt2f232032013-04-25 16:04:51 +00003229 trans = btrfs_start_transaction(fs_info->fs_root, 0);
3230 if (IS_ERR(trans)) {
3231 err = PTR_ERR(trans);
3232 break;
3233 }
Josef Bacikafcdd122016-09-02 15:40:02 -04003234 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
Jan Schmidt2f232032013-04-25 16:04:51 +00003235 err = -EINTR;
3236 } else {
Lu Fengqi62088ca2018-07-18 14:45:42 +08003237 err = qgroup_rescan_leaf(trans, path);
Jan Schmidt2f232032013-04-25 16:04:51 +00003238 }
3239 if (err > 0)
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003240 btrfs_commit_transaction(trans);
Jan Schmidt2f232032013-04-25 16:04:51 +00003241 else
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003242 btrfs_end_transaction(trans);
Jan Schmidt2f232032013-04-25 16:04:51 +00003243 }
3244
3245out:
Jan Schmidt2f232032013-04-25 16:04:51 +00003246 btrfs_free_path(path);
Jan Schmidt2f232032013-04-25 16:04:51 +00003247
3248 mutex_lock(&fs_info->qgroup_rescan_lock);
Qu Wenruo33931682015-02-27 16:24:24 +08003249 if (err > 0 &&
Jan Schmidt2f232032013-04-25 16:04:51 +00003250 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
3251 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3252 } else if (err < 0) {
3253 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
3254 }
3255 mutex_unlock(&fs_info->qgroup_rescan_lock);
3256
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003257 /*
Nicholas D Steeves01327612016-05-19 21:18:45 -04003258 * only update status, since the previous part has already updated the
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003259 * qgroup info.
3260 */
3261 trans = btrfs_start_transaction(fs_info->quota_root, 1);
3262 if (IS_ERR(trans)) {
3263 err = PTR_ERR(trans);
Filipe Manana13fc1d22019-09-24 10:49:54 +01003264 trans = NULL;
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003265 btrfs_err(fs_info,
David Sterba913e1532017-07-13 15:32:18 +02003266 "fail to start transaction for status update: %d",
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003267 err);
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003268 }
Filipe Manana13fc1d22019-09-24 10:49:54 +01003269
3270 mutex_lock(&fs_info->qgroup_rescan_lock);
Filipe Mananacb13eea2020-12-14 10:10:45 +00003271 if (!stopped)
Filipe Manana13fc1d22019-09-24 10:49:54 +01003272 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3273 if (trans) {
3274 ret = update_qgroup_status_item(trans);
3275 if (ret < 0) {
3276 err = ret;
3277 btrfs_err(fs_info, "fail to update qgroup status: %d",
3278 err);
3279 }
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003280 }
Filipe Manana13fc1d22019-09-24 10:49:54 +01003281 fs_info->qgroup_rescan_running = false;
3282 complete_all(&fs_info->qgroup_rescan_completion);
3283 mutex_unlock(&fs_info->qgroup_rescan_lock);
3284
3285 if (!trans)
3286 return;
3287
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003288 btrfs_end_transaction(trans);
Qu Wenruo53b7cde2015-02-27 16:24:25 +08003289
Filipe Mananacb13eea2020-12-14 10:10:45 +00003290 if (stopped) {
Justin Maggard7343dd62015-11-04 15:56:16 -08003291 btrfs_info(fs_info, "qgroup scan paused");
3292 } else if (err >= 0) {
Frank Holtonefe120a2013-12-20 11:37:06 -05003293 btrfs_info(fs_info, "qgroup scan completed%s",
Qu Wenruo33931682015-02-27 16:24:24 +08003294 err > 0 ? " (inconsistency flag cleared)" : "");
Jan Schmidt2f232032013-04-25 16:04:51 +00003295 } else {
Frank Holtonefe120a2013-12-20 11:37:06 -05003296 btrfs_err(fs_info, "qgroup scan failed with %d", err);
Jan Schmidt2f232032013-04-25 16:04:51 +00003297 }
3298}
3299
Jan Schmidtb382a322013-05-28 15:47:24 +00003300/*
3301 * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
3302 * memory required for the rescan context.
3303 */
3304static int
3305qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
3306 int init_flags)
Jan Schmidt2f232032013-04-25 16:04:51 +00003307{
3308 int ret = 0;
Jan Schmidt2f232032013-04-25 16:04:51 +00003309
Qu Wenruo9593bf492018-05-02 13:28:03 +08003310 if (!init_flags) {
3311 /* we're resuming qgroup rescan at mount time */
Filipe Mananae4e7ede2018-06-27 00:43:15 +01003312 if (!(fs_info->qgroup_flags &
3313 BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
Qu Wenruo9593bf492018-05-02 13:28:03 +08003314 btrfs_warn(fs_info,
Nikolay Borisov37d02592019-11-18 14:16:44 +02003315 "qgroup rescan init failed, qgroup rescan is not queued");
Filipe Mananae4e7ede2018-06-27 00:43:15 +01003316 ret = -EINVAL;
3317 } else if (!(fs_info->qgroup_flags &
3318 BTRFS_QGROUP_STATUS_FLAG_ON)) {
Qu Wenruo9593bf492018-05-02 13:28:03 +08003319 btrfs_warn(fs_info,
Nikolay Borisov37d02592019-11-18 14:16:44 +02003320 "qgroup rescan init failed, qgroup is not enabled");
Filipe Mananae4e7ede2018-06-27 00:43:15 +01003321 ret = -EINVAL;
3322 }
3323
3324 if (ret)
3325 return ret;
Jan Schmidtb382a322013-05-28 15:47:24 +00003326 }
Jan Schmidt2f232032013-04-25 16:04:51 +00003327
3328 mutex_lock(&fs_info->qgroup_rescan_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00003329
3330 if (init_flags) {
Qu Wenruo9593bf492018-05-02 13:28:03 +08003331 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3332 btrfs_warn(fs_info,
3333 "qgroup rescan is already in progress");
Jan Schmidtb382a322013-05-28 15:47:24 +00003334 ret = -EINPROGRESS;
Qu Wenruo9593bf492018-05-02 13:28:03 +08003335 } else if (!(fs_info->qgroup_flags &
3336 BTRFS_QGROUP_STATUS_FLAG_ON)) {
3337 btrfs_warn(fs_info,
3338 "qgroup rescan init failed, qgroup is not enabled");
Jan Schmidtb382a322013-05-28 15:47:24 +00003339 ret = -EINVAL;
Qu Wenruo9593bf492018-05-02 13:28:03 +08003340 }
Jan Schmidtb382a322013-05-28 15:47:24 +00003341
3342 if (ret) {
Jan Schmidtb382a322013-05-28 15:47:24 +00003343 mutex_unlock(&fs_info->qgroup_rescan_lock);
Qu Wenruo9593bf492018-05-02 13:28:03 +08003344 return ret;
Jan Schmidtb382a322013-05-28 15:47:24 +00003345 }
Jan Schmidtb382a322013-05-28 15:47:24 +00003346 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3347 }
3348
3349 memset(&fs_info->qgroup_rescan_progress, 0,
3350 sizeof(fs_info->qgroup_rescan_progress));
3351 fs_info->qgroup_rescan_progress.objectid = progress_objectid;
Filipe Manana190631f2015-11-05 10:06:23 +00003352 init_completion(&fs_info->qgroup_rescan_completion);
Jan Schmidtb382a322013-05-28 15:47:24 +00003353 mutex_unlock(&fs_info->qgroup_rescan_lock);
3354
Qu Wenruofc97fab2014-02-28 10:46:16 +08003355 btrfs_init_work(&fs_info->qgroup_rescan_work,
3356 btrfs_qgroup_rescan_worker, NULL, NULL);
Jan Schmidtb382a322013-05-28 15:47:24 +00003357 return 0;
3358}
Jan Schmidt2f232032013-04-25 16:04:51 +00003359
Jan Schmidtb382a322013-05-28 15:47:24 +00003360static void
3361qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
3362{
3363 struct rb_node *n;
3364 struct btrfs_qgroup *qgroup;
3365
3366 spin_lock(&fs_info->qgroup_lock);
Jan Schmidt2f232032013-04-25 16:04:51 +00003367 /* clear all current qgroup tracking information */
3368 for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
3369 qgroup = rb_entry(n, struct btrfs_qgroup, node);
3370 qgroup->rfer = 0;
3371 qgroup->rfer_cmpr = 0;
3372 qgroup->excl = 0;
3373 qgroup->excl_cmpr = 0;
Qu Wenruo9c7b0c22018-08-10 10:20:26 +08003374 qgroup_dirty(fs_info, qgroup);
Jan Schmidt2f232032013-04-25 16:04:51 +00003375 }
3376 spin_unlock(&fs_info->qgroup_lock);
Jan Schmidtb382a322013-05-28 15:47:24 +00003377}
Jan Schmidt2f232032013-04-25 16:04:51 +00003378
Jan Schmidtb382a322013-05-28 15:47:24 +00003379int
3380btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
3381{
3382 int ret = 0;
3383 struct btrfs_trans_handle *trans;
3384
3385 ret = qgroup_rescan_init(fs_info, 0, 1);
3386 if (ret)
3387 return ret;
3388
3389 /*
3390 * We have set the rescan_progress to 0, which means no more
3391 * delayed refs will be accounted by btrfs_qgroup_account_ref.
3392 * However, btrfs_qgroup_account_ref may be right after its call
3393 * to btrfs_find_all_roots, in which case it would still do the
3394 * accounting.
3395 * To solve this, we're committing the transaction, which will
3396 * ensure we run all delayed refs and only after that, we are
3397 * going to clear all tracking information for a clean start.
3398 */
3399
3400 trans = btrfs_join_transaction(fs_info->fs_root);
3401 if (IS_ERR(trans)) {
3402 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3403 return PTR_ERR(trans);
3404 }
Jeff Mahoney3a45bb22016-09-09 21:39:03 -04003405 ret = btrfs_commit_transaction(trans);
Jan Schmidtb382a322013-05-28 15:47:24 +00003406 if (ret) {
3407 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
3408 return ret;
3409 }
3410
3411 qgroup_rescan_zero_tracking(fs_info);
3412
Qu Wenruod61acbb2020-02-07 13:38:20 +08003413 mutex_lock(&fs_info->qgroup_rescan_lock);
3414 fs_info->qgroup_rescan_running = true;
Qu Wenruofc97fab2014-02-28 10:46:16 +08003415 btrfs_queue_work(fs_info->qgroup_rescan_workers,
3416 &fs_info->qgroup_rescan_work);
Qu Wenruod61acbb2020-02-07 13:38:20 +08003417 mutex_unlock(&fs_info->qgroup_rescan_lock);
Jan Schmidt2f232032013-04-25 16:04:51 +00003418
3419 return 0;
3420}
Jan Schmidt57254b6e2013-05-06 19:14:17 +00003421
Jeff Mahoneyd06f23d2016-08-08 22:08:06 -04003422int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info,
3423 bool interruptible)
Jan Schmidt57254b6e2013-05-06 19:14:17 +00003424{
3425 int running;
3426 int ret = 0;
3427
3428 mutex_lock(&fs_info->qgroup_rescan_lock);
Jeff Mahoneyd2c609b2016-08-15 12:10:33 -04003429 running = fs_info->qgroup_rescan_running;
Jan Schmidt57254b6e2013-05-06 19:14:17 +00003430 mutex_unlock(&fs_info->qgroup_rescan_lock);
3431
Jeff Mahoneyd06f23d2016-08-08 22:08:06 -04003432 if (!running)
3433 return 0;
3434
3435 if (interruptible)
Jan Schmidt57254b6e2013-05-06 19:14:17 +00003436 ret = wait_for_completion_interruptible(
3437 &fs_info->qgroup_rescan_completion);
Jeff Mahoneyd06f23d2016-08-08 22:08:06 -04003438 else
3439 wait_for_completion(&fs_info->qgroup_rescan_completion);
Jan Schmidt57254b6e2013-05-06 19:14:17 +00003440
3441 return ret;
3442}
Jan Schmidtb382a322013-05-28 15:47:24 +00003443
3444/*
3445 * this is only called from open_ctree where we're still single threaded, thus
3446 * locking is omitted here.
3447 */
3448void
3449btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
3450{
Qu Wenruod61acbb2020-02-07 13:38:20 +08003451 if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
3452 mutex_lock(&fs_info->qgroup_rescan_lock);
3453 fs_info->qgroup_rescan_running = true;
Qu Wenruofc97fab2014-02-28 10:46:16 +08003454 btrfs_queue_work(fs_info->qgroup_rescan_workers,
3455 &fs_info->qgroup_rescan_work);
Qu Wenruod61acbb2020-02-07 13:38:20 +08003456 mutex_unlock(&fs_info->qgroup_rescan_lock);
3457 }
Jan Schmidtb382a322013-05-28 15:47:24 +00003458}
Qu Wenruo52472552015-10-12 16:05:40 +08003459
Qu Wenruo263da812020-07-08 14:24:45 +08003460#define rbtree_iterate_from_safe(node, next, start) \
3461 for (node = start; node && ({ next = rb_next(node); 1;}); node = next)
3462
3463static int qgroup_unreserve_range(struct btrfs_inode *inode,
3464 struct extent_changeset *reserved, u64 start,
3465 u64 len)
3466{
3467 struct rb_node *node;
3468 struct rb_node *next;
Dan Carpenterf07728d2020-10-23 14:26:33 +03003469 struct ulist_node *entry;
Qu Wenruo263da812020-07-08 14:24:45 +08003470 int ret = 0;
3471
3472 node = reserved->range_changed.root.rb_node;
Dan Carpenterf07728d2020-10-23 14:26:33 +03003473 if (!node)
3474 return 0;
Qu Wenruo263da812020-07-08 14:24:45 +08003475 while (node) {
3476 entry = rb_entry(node, struct ulist_node, rb_node);
3477 if (entry->val < start)
3478 node = node->rb_right;
Qu Wenruo263da812020-07-08 14:24:45 +08003479 else
Dan Carpenterf07728d2020-10-23 14:26:33 +03003480 node = node->rb_left;
Qu Wenruo263da812020-07-08 14:24:45 +08003481 }
3482
Qu Wenruo263da812020-07-08 14:24:45 +08003483 if (entry->val > start && rb_prev(&entry->rb_node))
3484 entry = rb_entry(rb_prev(&entry->rb_node), struct ulist_node,
3485 rb_node);
3486
3487 rbtree_iterate_from_safe(node, next, &entry->rb_node) {
3488 u64 entry_start;
3489 u64 entry_end;
3490 u64 entry_len;
3491 int clear_ret;
3492
3493 entry = rb_entry(node, struct ulist_node, rb_node);
3494 entry_start = entry->val;
3495 entry_end = entry->aux;
3496 entry_len = entry_end - entry_start + 1;
3497
3498 if (entry_start >= start + len)
3499 break;
3500 if (entry_start + entry_len <= start)
3501 continue;
3502 /*
3503 * Now the entry is in [start, start + len), revert the
3504 * EXTENT_QGROUP_RESERVED bit.
3505 */
3506 clear_ret = clear_extent_bits(&inode->io_tree, entry_start,
3507 entry_end, EXTENT_QGROUP_RESERVED);
3508 if (!ret && clear_ret < 0)
3509 ret = clear_ret;
3510
3511 ulist_del(&reserved->range_changed, entry->val, entry->aux);
3512 if (likely(reserved->bytes_changed >= entry_len)) {
3513 reserved->bytes_changed -= entry_len;
3514 } else {
3515 WARN_ON(1);
3516 reserved->bytes_changed = 0;
3517 }
3518 }
3519
3520 return ret;
3521}
3522
Qu Wenruo52472552015-10-12 16:05:40 +08003523/*
Qu Wenruoc53e9652020-07-13 18:50:48 +08003524 * Try to free some space for qgroup.
Qu Wenruo52472552015-10-12 16:05:40 +08003525 *
Qu Wenruoc53e9652020-07-13 18:50:48 +08003526 * For qgroup, there are only 3 ways to free qgroup space:
3527 * - Flush nodatacow write
3528 * Any nodatacow write will free its reserved data space at run_delalloc_range().
3529 * In theory, we should only flush nodatacow inodes, but it's not yet
3530 * possible, so we need to flush the whole root.
Qu Wenruo52472552015-10-12 16:05:40 +08003531 *
Qu Wenruoc53e9652020-07-13 18:50:48 +08003532 * - Wait for ordered extents
3533 * When ordered extents are finished, their reserved metadata is finally
3534 * converted to per_trans status, which can be freed by later commit
3535 * transaction.
Qu Wenruo52472552015-10-12 16:05:40 +08003536 *
Qu Wenruoc53e9652020-07-13 18:50:48 +08003537 * - Commit transaction
3538 * This would free the meta_per_trans space.
3539 * In theory this shouldn't provide much space, but any more qgroup space
3540 * is needed.
Qu Wenruo52472552015-10-12 16:05:40 +08003541 */
Qu Wenruoc53e9652020-07-13 18:50:48 +08003542static int try_flush_qgroup(struct btrfs_root *root)
3543{
3544 struct btrfs_trans_handle *trans;
3545 int ret;
3546
Nikolay Borisovae396a32021-02-22 18:40:45 +02003547 /* Can't hold an open transaction or we run the risk of deadlocking */
3548 ASSERT(current->journal_info == NULL ||
3549 current->journal_info == BTRFS_SEND_TRANS_STUB);
3550 if (WARN_ON(current->journal_info &&
3551 current->journal_info != BTRFS_SEND_TRANS_STUB))
3552 return 0;
Qu Wenruo6f232772020-11-11 19:38:18 +08003553
Qu Wenruoae5e0702020-12-04 09:24:47 +08003554 /*
3555 * We don't want to run flush again and again, so if there is a running
3556 * one, we won't try to start a new flush, but exit directly.
3557 */
3558 if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
Qu Wenruoae5e0702020-12-04 09:24:47 +08003559 wait_event(root->qgroup_flush_wait,
3560 !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
3561 return 0;
3562 }
3563
Qu Wenruoc53e9652020-07-13 18:50:48 +08003564 ret = btrfs_start_delalloc_snapshot(root);
3565 if (ret < 0)
3566 goto out;
3567 btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
3568
3569 trans = btrfs_join_transaction(root);
3570 if (IS_ERR(trans)) {
3571 ret = PTR_ERR(trans);
3572 goto out;
3573 }
3574
Nikolay Borisovae396a32021-02-22 18:40:45 +02003575 ret = btrfs_commit_transaction(trans);
Qu Wenruoc53e9652020-07-13 18:50:48 +08003576out:
3577 clear_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state);
3578 wake_up(&root->qgroup_flush_wait);
3579 return ret;
3580}
3581
3582static int qgroup_reserve_data(struct btrfs_inode *inode,
Qu Wenruo364ecf32017-02-27 15:10:38 +08003583 struct extent_changeset **reserved_ret, u64 start,
3584 u64 len)
Qu Wenruo52472552015-10-12 16:05:40 +08003585{
Nikolay Borisov7661a3e2020-06-03 08:55:37 +03003586 struct btrfs_root *root = inode->root;
Qu Wenruo364ecf32017-02-27 15:10:38 +08003587 struct extent_changeset *reserved;
Qu Wenruo263da812020-07-08 14:24:45 +08003588 bool new_reserved = false;
Qu Wenruo364ecf32017-02-27 15:10:38 +08003589 u64 orig_reserved;
3590 u64 to_reserve;
Qu Wenruo52472552015-10-12 16:05:40 +08003591 int ret;
3592
Josef Bacikafcdd122016-09-02 15:40:02 -04003593 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003594 !is_fstree(root->root_key.objectid) || len == 0)
Qu Wenruo52472552015-10-12 16:05:40 +08003595 return 0;
3596
Qu Wenruo364ecf32017-02-27 15:10:38 +08003597 /* @reserved parameter is mandatory for qgroup */
3598 if (WARN_ON(!reserved_ret))
3599 return -EINVAL;
3600 if (!*reserved_ret) {
Qu Wenruo263da812020-07-08 14:24:45 +08003601 new_reserved = true;
Qu Wenruo364ecf32017-02-27 15:10:38 +08003602 *reserved_ret = extent_changeset_alloc();
3603 if (!*reserved_ret)
3604 return -ENOMEM;
3605 }
3606 reserved = *reserved_ret;
3607 /* Record already reserved space */
3608 orig_reserved = reserved->bytes_changed;
Nikolay Borisov7661a3e2020-06-03 08:55:37 +03003609 ret = set_record_extent_bits(&inode->io_tree, start,
Qu Wenruo364ecf32017-02-27 15:10:38 +08003610 start + len -1, EXTENT_QGROUP_RESERVED, reserved);
3611
3612 /* Newly reserved space */
3613 to_reserve = reserved->bytes_changed - orig_reserved;
Nikolay Borisov7661a3e2020-06-03 08:55:37 +03003614 trace_btrfs_qgroup_reserve_data(&inode->vfs_inode, start, len,
Qu Wenruo364ecf32017-02-27 15:10:38 +08003615 to_reserve, QGROUP_RESERVE);
Qu Wenruo52472552015-10-12 16:05:40 +08003616 if (ret < 0)
Qu Wenruo263da812020-07-08 14:24:45 +08003617 goto out;
Qu Wenruodba21322017-12-12 15:34:25 +08003618 ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA);
Qu Wenruo52472552015-10-12 16:05:40 +08003619 if (ret < 0)
3620 goto cleanup;
3621
Qu Wenruo52472552015-10-12 16:05:40 +08003622 return ret;
3623
3624cleanup:
Qu Wenruo263da812020-07-08 14:24:45 +08003625 qgroup_unreserve_range(inode, reserved, start, len);
3626out:
3627 if (new_reserved) {
Nikolay Borisovd6ade682021-03-02 12:44:40 +02003628 extent_changeset_free(reserved);
Qu Wenruo263da812020-07-08 14:24:45 +08003629 *reserved_ret = NULL;
3630 }
Qu Wenruo52472552015-10-12 16:05:40 +08003631 return ret;
3632}
Qu Wenruof695fdc2015-10-12 16:28:06 +08003633
Qu Wenruoc53e9652020-07-13 18:50:48 +08003634/*
3635 * Reserve qgroup space for range [start, start + len).
3636 *
3637 * This function will either reserve space from related qgroups or do nothing
3638 * if the range is already reserved.
3639 *
3640 * Return 0 for successful reservation
3641 * Return <0 for error (including -EQUOT)
3642 *
3643 * NOTE: This function may sleep for memory allocation, dirty page flushing and
3644 * commit transaction. So caller should not hold any dirty page locked.
3645 */
3646int btrfs_qgroup_reserve_data(struct btrfs_inode *inode,
3647 struct extent_changeset **reserved_ret, u64 start,
3648 u64 len)
3649{
3650 int ret;
3651
3652 ret = qgroup_reserve_data(inode, reserved_ret, start, len);
3653 if (ret <= 0 && ret != -EDQUOT)
3654 return ret;
3655
3656 ret = try_flush_qgroup(inode->root);
3657 if (ret < 0)
3658 return ret;
3659 return qgroup_reserve_data(inode, reserved_ret, start, len);
3660}
3661
Qu Wenruobc42bda2017-02-27 15:10:39 +08003662/* Free ranges specified by @reserved, normally in error path */
Nikolay Borisovdf2cfd12020-06-03 08:55:09 +03003663static int qgroup_free_reserved_data(struct btrfs_inode *inode,
Qu Wenruobc42bda2017-02-27 15:10:39 +08003664 struct extent_changeset *reserved, u64 start, u64 len)
3665{
Nikolay Borisovdf2cfd12020-06-03 08:55:09 +03003666 struct btrfs_root *root = inode->root;
Qu Wenruobc42bda2017-02-27 15:10:39 +08003667 struct ulist_node *unode;
3668 struct ulist_iterator uiter;
3669 struct extent_changeset changeset;
3670 int freed = 0;
3671 int ret;
3672
3673 extent_changeset_init(&changeset);
3674 len = round_up(start + len, root->fs_info->sectorsize);
3675 start = round_down(start, root->fs_info->sectorsize);
3676
3677 ULIST_ITER_INIT(&uiter);
3678 while ((unode = ulist_next(&reserved->range_changed, &uiter))) {
3679 u64 range_start = unode->val;
3680 /* unode->aux is the inclusive end */
3681 u64 range_len = unode->aux - range_start + 1;
3682 u64 free_start;
3683 u64 free_len;
3684
3685 extent_changeset_release(&changeset);
3686
3687 /* Only free range in range [start, start + len) */
3688 if (range_start >= start + len ||
3689 range_start + range_len <= start)
3690 continue;
3691 free_start = max(range_start, start);
3692 free_len = min(start + len, range_start + range_len) -
3693 free_start;
3694 /*
3695 * TODO: To also modify reserved->ranges_reserved to reflect
3696 * the modification.
3697 *
3698 * However as long as we free qgroup reserved according to
3699 * EXTENT_QGROUP_RESERVED, we won't double free.
3700 * So not need to rush.
3701 */
Nikolay Borisovdf2cfd12020-06-03 08:55:09 +03003702 ret = clear_record_extent_bits(&inode->io_tree, free_start,
3703 free_start + free_len - 1,
Qu Wenruobc42bda2017-02-27 15:10:39 +08003704 EXTENT_QGROUP_RESERVED, &changeset);
3705 if (ret < 0)
3706 goto out;
3707 freed += changeset.bytes_changed;
3708 }
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003709 btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid, freed,
Qu Wenruod4e5c922017-12-12 15:34:23 +08003710 BTRFS_QGROUP_RSV_DATA);
Qu Wenruobc42bda2017-02-27 15:10:39 +08003711 ret = freed;
3712out:
3713 extent_changeset_release(&changeset);
3714 return ret;
3715}
3716
Nikolay Borisov8769af92020-06-03 08:55:10 +03003717static int __btrfs_qgroup_release_data(struct btrfs_inode *inode,
Qu Wenruobc42bda2017-02-27 15:10:39 +08003718 struct extent_changeset *reserved, u64 start, u64 len,
3719 int free)
Qu Wenruof695fdc2015-10-12 16:28:06 +08003720{
3721 struct extent_changeset changeset;
Qu Wenruo81fb6f72015-09-28 16:57:53 +08003722 int trace_op = QGROUP_RELEASE;
Qu Wenruof695fdc2015-10-12 16:28:06 +08003723 int ret;
3724
Nikolay Borisov8769af92020-06-03 08:55:10 +03003725 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &inode->root->fs_info->flags))
Qu Wenruo3628b4c2018-10-09 14:36:45 +08003726 return 0;
3727
Qu Wenruobc42bda2017-02-27 15:10:39 +08003728 /* In release case, we shouldn't have @reserved */
3729 WARN_ON(!free && reserved);
3730 if (free && reserved)
Nikolay Borisov8769af92020-06-03 08:55:10 +03003731 return qgroup_free_reserved_data(inode, reserved, start, len);
Qu Wenruo364ecf32017-02-27 15:10:38 +08003732 extent_changeset_init(&changeset);
Nikolay Borisov8769af92020-06-03 08:55:10 +03003733 ret = clear_record_extent_bits(&inode->io_tree, start, start + len -1,
3734 EXTENT_QGROUP_RESERVED, &changeset);
Qu Wenruof695fdc2015-10-12 16:28:06 +08003735 if (ret < 0)
3736 goto out;
3737
Qu Wenruod51ea5d2017-03-13 15:52:09 +08003738 if (free)
3739 trace_op = QGROUP_FREE;
Nikolay Borisov8769af92020-06-03 08:55:10 +03003740 trace_btrfs_qgroup_release_data(&inode->vfs_inode, start, len,
Qu Wenruod51ea5d2017-03-13 15:52:09 +08003741 changeset.bytes_changed, trace_op);
3742 if (free)
Nikolay Borisov8769af92020-06-03 08:55:10 +03003743 btrfs_qgroup_free_refroot(inode->root->fs_info,
3744 inode->root->root_key.objectid,
Qu Wenruod4e5c922017-12-12 15:34:23 +08003745 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
Qu Wenruo7bc329c2017-02-27 15:10:36 +08003746 ret = changeset.bytes_changed;
Qu Wenruof695fdc2015-10-12 16:28:06 +08003747out:
Qu Wenruo364ecf32017-02-27 15:10:38 +08003748 extent_changeset_release(&changeset);
Qu Wenruof695fdc2015-10-12 16:28:06 +08003749 return ret;
3750}
3751
3752/*
3753 * Free a reserved space range from io_tree and related qgroups
3754 *
3755 * Should be called when a range of pages get invalidated before reaching disk.
3756 * Or for error cleanup case.
Qu Wenruobc42bda2017-02-27 15:10:39 +08003757 * if @reserved is given, only reserved range in [@start, @start + @len) will
3758 * be freed.
Qu Wenruof695fdc2015-10-12 16:28:06 +08003759 *
3760 * For data written to disk, use btrfs_qgroup_release_data().
3761 *
3762 * NOTE: This function may sleep for memory allocation.
3763 */
Nikolay Borisov8b8a9792020-06-03 08:55:11 +03003764int btrfs_qgroup_free_data(struct btrfs_inode *inode,
Qu Wenruobc42bda2017-02-27 15:10:39 +08003765 struct extent_changeset *reserved, u64 start, u64 len)
Qu Wenruof695fdc2015-10-12 16:28:06 +08003766{
Nikolay Borisov8b8a9792020-06-03 08:55:11 +03003767 return __btrfs_qgroup_release_data(inode, reserved, start, len, 1);
Qu Wenruof695fdc2015-10-12 16:28:06 +08003768}
3769
3770/*
3771 * Release a reserved space range from io_tree only.
3772 *
3773 * Should be called when a range of pages get written to disk and corresponding
3774 * FILE_EXTENT is inserted into corresponding root.
3775 *
3776 * Since new qgroup accounting framework will only update qgroup numbers at
3777 * commit_transaction() time, its reserved space shouldn't be freed from
3778 * related qgroups.
3779 *
3780 * But we should release the range from io_tree, to allow further write to be
3781 * COWed.
3782 *
3783 * NOTE: This function may sleep for memory allocation.
3784 */
Nikolay Borisov72b7d152020-06-03 08:55:18 +03003785int btrfs_qgroup_release_data(struct btrfs_inode *inode, u64 start, u64 len)
Qu Wenruof695fdc2015-10-12 16:28:06 +08003786{
Nikolay Borisov72b7d152020-06-03 08:55:18 +03003787 return __btrfs_qgroup_release_data(inode, NULL, start, len, 0);
Qu Wenruof695fdc2015-10-12 16:28:06 +08003788}
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003789
Qu Wenruo82874752017-12-12 15:34:34 +08003790static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3791 enum btrfs_qgroup_rsv_type type)
3792{
3793 if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3794 type != BTRFS_QGROUP_RSV_META_PERTRANS)
3795 return;
3796 if (num_bytes == 0)
3797 return;
3798
3799 spin_lock(&root->qgroup_meta_rsv_lock);
3800 if (type == BTRFS_QGROUP_RSV_META_PREALLOC)
3801 root->qgroup_meta_rsv_prealloc += num_bytes;
3802 else
3803 root->qgroup_meta_rsv_pertrans += num_bytes;
3804 spin_unlock(&root->qgroup_meta_rsv_lock);
3805}
3806
3807static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes,
3808 enum btrfs_qgroup_rsv_type type)
3809{
3810 if (type != BTRFS_QGROUP_RSV_META_PREALLOC &&
3811 type != BTRFS_QGROUP_RSV_META_PERTRANS)
3812 return 0;
3813 if (num_bytes == 0)
3814 return 0;
3815
3816 spin_lock(&root->qgroup_meta_rsv_lock);
3817 if (type == BTRFS_QGROUP_RSV_META_PREALLOC) {
3818 num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc,
3819 num_bytes);
3820 root->qgroup_meta_rsv_prealloc -= num_bytes;
3821 } else {
3822 num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans,
3823 num_bytes);
3824 root->qgroup_meta_rsv_pertrans -= num_bytes;
3825 }
3826 spin_unlock(&root->qgroup_meta_rsv_lock);
3827 return num_bytes;
3828}
3829
Nikolay Borisov80e9bae2021-02-22 18:40:43 +02003830int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3831 enum btrfs_qgroup_rsv_type type, bool enforce)
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003832{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003833 struct btrfs_fs_info *fs_info = root->fs_info;
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003834 int ret;
3835
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003836 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003837 !is_fstree(root->root_key.objectid) || num_bytes == 0)
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003838 return 0;
3839
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003840 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
Qu Wenruofd2b0072019-10-17 10:38:36 +08003841 trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
Qu Wenruo733e03a2017-12-12 15:34:29 +08003842 ret = qgroup_reserve(root, num_bytes, enforce, type);
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003843 if (ret < 0)
3844 return ret;
Qu Wenruo82874752017-12-12 15:34:34 +08003845 /*
3846 * Record what we have reserved into root.
3847 *
3848 * To avoid quota disabled->enabled underflow.
3849 * In that case, we may try to free space we haven't reserved
3850 * (since quota was disabled), so record what we reserved into root.
3851 * And ensure later release won't underflow this number.
3852 */
3853 add_root_meta_rsv(root, num_bytes, type);
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003854 return ret;
3855}
3856
Qu Wenruoc53e9652020-07-13 18:50:48 +08003857int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
3858 enum btrfs_qgroup_rsv_type type, bool enforce)
3859{
3860 int ret;
3861
Nikolay Borisov80e9bae2021-02-22 18:40:43 +02003862 ret = btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
Qu Wenruoc53e9652020-07-13 18:50:48 +08003863 if (ret <= 0 && ret != -EDQUOT)
3864 return ret;
3865
3866 ret = try_flush_qgroup(root);
3867 if (ret < 0)
3868 return ret;
Nikolay Borisov80e9bae2021-02-22 18:40:43 +02003869 return btrfs_qgroup_reserve_meta(root, num_bytes, type, enforce);
Qu Wenruoc53e9652020-07-13 18:50:48 +08003870}
3871
Qu Wenruo733e03a2017-12-12 15:34:29 +08003872void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root)
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003873{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003874 struct btrfs_fs_info *fs_info = root->fs_info;
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003875
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003876 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003877 !is_fstree(root->root_key.objectid))
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003878 return;
3879
Qu Wenruoe1211d02017-12-12 15:34:30 +08003880 /* TODO: Update trace point to handle such free */
Qu Wenruo4ee0d882017-12-12 15:34:35 +08003881 trace_qgroup_meta_free_all_pertrans(root);
Qu Wenruoe1211d02017-12-12 15:34:30 +08003882 /* Special value -1 means to free all reserved space */
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003883 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid, (u64)-1,
Qu Wenruo733e03a2017-12-12 15:34:29 +08003884 BTRFS_QGROUP_RSV_META_PERTRANS);
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003885}
3886
Qu Wenruo733e03a2017-12-12 15:34:29 +08003887void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
3888 enum btrfs_qgroup_rsv_type type)
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003889{
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003890 struct btrfs_fs_info *fs_info = root->fs_info;
3891
3892 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003893 !is_fstree(root->root_key.objectid))
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003894 return;
3895
Qu Wenruo82874752017-12-12 15:34:34 +08003896 /*
3897 * reservation for META_PREALLOC can happen before quota is enabled,
3898 * which can lead to underflow.
3899 * Here ensure we will only free what we really have reserved.
3900 */
3901 num_bytes = sub_root_meta_rsv(root, num_bytes, type);
Jeff Mahoney0b246af2016-06-22 18:54:23 -04003902 BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
Qu Wenruofd2b0072019-10-17 10:38:36 +08003903 trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003904 btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
3905 num_bytes, type);
Qu Wenruo55eeaf02015-09-08 17:08:38 +08003906}
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003907
Qu Wenruo64cfaef2017-12-12 15:34:31 +08003908static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root,
3909 int num_bytes)
3910{
Qu Wenruo64cfaef2017-12-12 15:34:31 +08003911 struct btrfs_qgroup *qgroup;
3912 struct ulist_node *unode;
3913 struct ulist_iterator uiter;
3914 int ret = 0;
3915
3916 if (num_bytes == 0)
3917 return;
Marcos Paulo de Souzae3b0edd2019-11-25 21:58:50 -03003918 if (!fs_info->quota_root)
Qu Wenruo64cfaef2017-12-12 15:34:31 +08003919 return;
3920
3921 spin_lock(&fs_info->qgroup_lock);
3922 qgroup = find_qgroup_rb(fs_info, ref_root);
3923 if (!qgroup)
3924 goto out;
3925 ulist_reinit(fs_info->qgroup_ulist);
3926 ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
David Sterbaa1840b52018-03-27 19:04:50 +02003927 qgroup_to_aux(qgroup), GFP_ATOMIC);
Qu Wenruo64cfaef2017-12-12 15:34:31 +08003928 if (ret < 0)
3929 goto out;
3930 ULIST_ITER_INIT(&uiter);
3931 while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
3932 struct btrfs_qgroup *qg;
3933 struct btrfs_qgroup_list *glist;
3934
3935 qg = unode_aux_to_qgroup(unode);
3936
3937 qgroup_rsv_release(fs_info, qg, num_bytes,
3938 BTRFS_QGROUP_RSV_META_PREALLOC);
3939 qgroup_rsv_add(fs_info, qg, num_bytes,
3940 BTRFS_QGROUP_RSV_META_PERTRANS);
3941 list_for_each_entry(glist, &qg->groups, next_group) {
3942 ret = ulist_add(fs_info->qgroup_ulist,
3943 glist->group->qgroupid,
David Sterbaa1840b52018-03-27 19:04:50 +02003944 qgroup_to_aux(glist->group), GFP_ATOMIC);
Qu Wenruo64cfaef2017-12-12 15:34:31 +08003945 if (ret < 0)
3946 goto out;
3947 }
3948 }
3949out:
3950 spin_unlock(&fs_info->qgroup_lock);
3951}
3952
3953void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
3954{
3955 struct btrfs_fs_info *fs_info = root->fs_info;
3956
3957 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003958 !is_fstree(root->root_key.objectid))
Qu Wenruo64cfaef2017-12-12 15:34:31 +08003959 return;
Qu Wenruo82874752017-12-12 15:34:34 +08003960 /* Same as btrfs_qgroup_free_meta_prealloc() */
3961 num_bytes = sub_root_meta_rsv(root, num_bytes,
3962 BTRFS_QGROUP_RSV_META_PREALLOC);
Qu Wenruo4ee0d882017-12-12 15:34:35 +08003963 trace_qgroup_meta_convert(root, num_bytes);
Misono Tomohiro4fd786e2018-08-06 14:25:24 +09003964 qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
Qu Wenruo64cfaef2017-12-12 15:34:31 +08003965}
3966
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003967/*
Nicholas D Steeves01327612016-05-19 21:18:45 -04003968 * Check qgroup reserved space leaking, normally at destroy inode
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003969 * time
3970 */
Nikolay Borisovcfdd4592020-06-03 08:55:46 +03003971void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003972{
3973 struct extent_changeset changeset;
3974 struct ulist_node *unode;
3975 struct ulist_iterator iter;
3976 int ret;
3977
Qu Wenruo364ecf32017-02-27 15:10:38 +08003978 extent_changeset_init(&changeset);
Nikolay Borisovcfdd4592020-06-03 08:55:46 +03003979 ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
David Sterbaf734c442016-04-26 23:54:39 +02003980 EXTENT_QGROUP_RESERVED, &changeset);
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003981
3982 WARN_ON(ret < 0);
3983 if (WARN_ON(changeset.bytes_changed)) {
3984 ULIST_ITER_INIT(&iter);
David Sterba53d32352017-02-13 13:42:29 +01003985 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
Nikolay Borisovcfdd4592020-06-03 08:55:46 +03003986 btrfs_warn(inode->root->fs_info,
3987 "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
3988 btrfs_ino(inode), unode->val, unode->aux);
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003989 }
Nikolay Borisovcfdd4592020-06-03 08:55:46 +03003990 btrfs_qgroup_free_refroot(inode->root->fs_info,
3991 inode->root->root_key.objectid,
Qu Wenruod4e5c922017-12-12 15:34:23 +08003992 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
David Sterba0b08e1f2017-02-13 14:24:35 +01003993
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003994 }
Qu Wenruo364ecf32017-02-27 15:10:38 +08003995 extent_changeset_release(&changeset);
Qu Wenruo56fa9d02015-10-13 09:53:10 +08003996}
Qu Wenruo370a11b2019-01-23 15:15:16 +08003997
3998void btrfs_qgroup_init_swapped_blocks(
3999 struct btrfs_qgroup_swapped_blocks *swapped_blocks)
4000{
4001 int i;
4002
4003 spin_lock_init(&swapped_blocks->lock);
4004 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
4005 swapped_blocks->blocks[i] = RB_ROOT;
4006 swapped_blocks->swapped = false;
4007}
4008
4009/*
4010 * Delete all swapped blocks record of @root.
4011 * Every record here means we skipped a full subtree scan for qgroup.
4012 *
4013 * Gets called when committing one transaction.
4014 */
4015void btrfs_qgroup_clean_swapped_blocks(struct btrfs_root *root)
4016{
4017 struct btrfs_qgroup_swapped_blocks *swapped_blocks;
4018 int i;
4019
4020 swapped_blocks = &root->swapped_blocks;
4021
4022 spin_lock(&swapped_blocks->lock);
4023 if (!swapped_blocks->swapped)
4024 goto out;
4025 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4026 struct rb_root *cur_root = &swapped_blocks->blocks[i];
4027 struct btrfs_qgroup_swapped_block *entry;
4028 struct btrfs_qgroup_swapped_block *next;
4029
4030 rbtree_postorder_for_each_entry_safe(entry, next, cur_root,
4031 node)
4032 kfree(entry);
4033 swapped_blocks->blocks[i] = RB_ROOT;
4034 }
4035 swapped_blocks->swapped = false;
4036out:
4037 spin_unlock(&swapped_blocks->lock);
4038}
4039
4040/*
4041 * Add subtree roots record into @subvol_root.
4042 *
4043 * @subvol_root: tree root of the subvolume tree get swapped
4044 * @bg: block group under balance
4045 * @subvol_parent/slot: pointer to the subtree root in subvolume tree
4046 * @reloc_parent/slot: pointer to the subtree root in reloc tree
4047 * BOTH POINTERS ARE BEFORE TREE SWAP
4048 * @last_snapshot: last snapshot generation of the subvolume tree
4049 */
4050int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
4051 struct btrfs_root *subvol_root,
David Sterba32da53862019-10-29 19:20:18 +01004052 struct btrfs_block_group *bg,
Qu Wenruo370a11b2019-01-23 15:15:16 +08004053 struct extent_buffer *subvol_parent, int subvol_slot,
4054 struct extent_buffer *reloc_parent, int reloc_slot,
4055 u64 last_snapshot)
4056{
4057 struct btrfs_fs_info *fs_info = subvol_root->fs_info;
4058 struct btrfs_qgroup_swapped_blocks *blocks = &subvol_root->swapped_blocks;
4059 struct btrfs_qgroup_swapped_block *block;
4060 struct rb_node **cur;
4061 struct rb_node *parent = NULL;
4062 int level = btrfs_header_level(subvol_parent) - 1;
4063 int ret = 0;
4064
4065 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4066 return 0;
4067
4068 if (btrfs_node_ptr_generation(subvol_parent, subvol_slot) >
4069 btrfs_node_ptr_generation(reloc_parent, reloc_slot)) {
4070 btrfs_err_rl(fs_info,
4071 "%s: bad parameter order, subvol_gen=%llu reloc_gen=%llu",
4072 __func__,
4073 btrfs_node_ptr_generation(subvol_parent, subvol_slot),
4074 btrfs_node_ptr_generation(reloc_parent, reloc_slot));
4075 return -EUCLEAN;
4076 }
4077
4078 block = kmalloc(sizeof(*block), GFP_NOFS);
4079 if (!block) {
4080 ret = -ENOMEM;
4081 goto out;
4082 }
4083
4084 /*
4085 * @reloc_parent/slot is still before swap, while @block is going to
4086 * record the bytenr after swap, so we do the swap here.
4087 */
4088 block->subvol_bytenr = btrfs_node_blockptr(reloc_parent, reloc_slot);
4089 block->subvol_generation = btrfs_node_ptr_generation(reloc_parent,
4090 reloc_slot);
4091 block->reloc_bytenr = btrfs_node_blockptr(subvol_parent, subvol_slot);
4092 block->reloc_generation = btrfs_node_ptr_generation(subvol_parent,
4093 subvol_slot);
4094 block->last_snapshot = last_snapshot;
4095 block->level = level;
Qu Wenruo57949d02019-05-21 19:28:08 +08004096
4097 /*
4098 * If we have bg == NULL, we're called from btrfs_recover_relocation(),
4099 * no one else can modify tree blocks thus we qgroup will not change
4100 * no matter the value of trace_leaf.
4101 */
4102 if (bg && bg->flags & BTRFS_BLOCK_GROUP_DATA)
Qu Wenruo370a11b2019-01-23 15:15:16 +08004103 block->trace_leaf = true;
4104 else
4105 block->trace_leaf = false;
4106 btrfs_node_key_to_cpu(reloc_parent, &block->first_key, reloc_slot);
4107
4108 /* Insert @block into @blocks */
4109 spin_lock(&blocks->lock);
4110 cur = &blocks->blocks[level].rb_node;
4111 while (*cur) {
4112 struct btrfs_qgroup_swapped_block *entry;
4113
4114 parent = *cur;
4115 entry = rb_entry(parent, struct btrfs_qgroup_swapped_block,
4116 node);
4117
4118 if (entry->subvol_bytenr < block->subvol_bytenr) {
4119 cur = &(*cur)->rb_left;
4120 } else if (entry->subvol_bytenr > block->subvol_bytenr) {
4121 cur = &(*cur)->rb_right;
4122 } else {
4123 if (entry->subvol_generation !=
4124 block->subvol_generation ||
4125 entry->reloc_bytenr != block->reloc_bytenr ||
4126 entry->reloc_generation !=
4127 block->reloc_generation) {
4128 /*
4129 * Duplicated but mismatch entry found.
4130 * Shouldn't happen.
4131 *
4132 * Marking qgroup inconsistent should be enough
4133 * for end users.
4134 */
4135 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4136 ret = -EEXIST;
4137 }
4138 kfree(block);
4139 goto out_unlock;
4140 }
4141 }
4142 rb_link_node(&block->node, parent, cur);
4143 rb_insert_color(&block->node, &blocks->blocks[level]);
4144 blocks->swapped = true;
4145out_unlock:
4146 spin_unlock(&blocks->lock);
4147out:
4148 if (ret < 0)
4149 fs_info->qgroup_flags |=
4150 BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4151 return ret;
4152}
Qu Wenruof616f5c2019-01-23 15:15:17 +08004153
4154/*
4155 * Check if the tree block is a subtree root, and if so do the needed
4156 * delayed subtree trace for qgroup.
4157 *
4158 * This is called during btrfs_cow_block().
4159 */
4160int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
4161 struct btrfs_root *root,
4162 struct extent_buffer *subvol_eb)
4163{
4164 struct btrfs_fs_info *fs_info = root->fs_info;
4165 struct btrfs_qgroup_swapped_blocks *blocks = &root->swapped_blocks;
4166 struct btrfs_qgroup_swapped_block *block;
4167 struct extent_buffer *reloc_eb = NULL;
4168 struct rb_node *node;
4169 bool found = false;
4170 bool swapped = false;
4171 int level = btrfs_header_level(subvol_eb);
4172 int ret = 0;
4173 int i;
4174
4175 if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
4176 return 0;
4177 if (!is_fstree(root->root_key.objectid) || !root->reloc_root)
4178 return 0;
4179
4180 spin_lock(&blocks->lock);
4181 if (!blocks->swapped) {
4182 spin_unlock(&blocks->lock);
4183 return 0;
4184 }
4185 node = blocks->blocks[level].rb_node;
4186
4187 while (node) {
4188 block = rb_entry(node, struct btrfs_qgroup_swapped_block, node);
4189 if (block->subvol_bytenr < subvol_eb->start) {
4190 node = node->rb_left;
4191 } else if (block->subvol_bytenr > subvol_eb->start) {
4192 node = node->rb_right;
4193 } else {
4194 found = true;
4195 break;
4196 }
4197 }
4198 if (!found) {
4199 spin_unlock(&blocks->lock);
4200 goto out;
4201 }
4202 /* Found one, remove it from @blocks first and update blocks->swapped */
4203 rb_erase(&block->node, &blocks->blocks[level]);
4204 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4205 if (RB_EMPTY_ROOT(&blocks->blocks[i])) {
4206 swapped = true;
4207 break;
4208 }
4209 }
4210 blocks->swapped = swapped;
4211 spin_unlock(&blocks->lock);
4212
4213 /* Read out reloc subtree root */
Josef Bacik1b7ec852020-11-05 10:45:18 -05004214 reloc_eb = read_tree_block(fs_info, block->reloc_bytenr, 0,
Qu Wenruof616f5c2019-01-23 15:15:17 +08004215 block->reloc_generation, block->level,
4216 &block->first_key);
4217 if (IS_ERR(reloc_eb)) {
4218 ret = PTR_ERR(reloc_eb);
4219 reloc_eb = NULL;
4220 goto free_out;
4221 }
4222 if (!extent_buffer_uptodate(reloc_eb)) {
4223 ret = -EIO;
4224 goto free_out;
4225 }
4226
4227 ret = qgroup_trace_subtree_swap(trans, reloc_eb, subvol_eb,
4228 block->last_snapshot, block->trace_leaf);
4229free_out:
4230 kfree(block);
4231 free_extent_buffer(reloc_eb);
4232out:
4233 if (ret < 0) {
4234 btrfs_err_rl(fs_info,
4235 "failed to account subtree at bytenr %llu: %d",
4236 subvol_eb->start, ret);
4237 fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
4238 }
4239 return ret;
4240}
Jeff Mahoney81f7eb02020-02-11 15:25:37 +08004241
4242void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
4243{
4244 struct btrfs_qgroup_extent_record *entry;
4245 struct btrfs_qgroup_extent_record *next;
4246 struct rb_root *root;
4247
4248 root = &trans->delayed_refs.dirty_extent_root;
4249 rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
4250 ulist_free(entry->old_roots);
4251 kfree(entry);
4252 }
4253}