David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2011 STRATO. All rights reserved. |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/sched.h> |
| 7 | #include <linux/pagemap.h> |
| 8 | #include <linux/writeback.h> |
| 9 | #include <linux/blkdev.h> |
| 10 | #include <linux/rbtree.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/workqueue.h> |
Filipe Brandenburger | 55e301f | 2013-01-29 06:04:50 +0000 | [diff] [blame] | 13 | #include <linux/btrfs.h> |
Qu Wenruo | a514d63 | 2017-12-22 16:06:39 +0800 | [diff] [blame] | 14 | #include <linux/sizes.h> |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 15 | |
| 16 | #include "ctree.h" |
| 17 | #include "transaction.h" |
| 18 | #include "disk-io.h" |
| 19 | #include "locking.h" |
| 20 | #include "ulist.h" |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 21 | #include "backref.h" |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 22 | #include "extent_io.h" |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 23 | #include "qgroup.h" |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 24 | |
Qu Wenruo | e69bcee | 2015-04-17 10:23:16 +0800 | [diff] [blame] | 25 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 26 | /* TODO XXX FIXME |
| 27 | * - subvol delete -> delete when ref goes to 0? delete limits also? |
| 28 | * - reorganize keys |
| 29 | * - compressed |
| 30 | * - sync |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 31 | * - copy also limits on subvol creation |
| 32 | * - limit |
| 33 | * - caches fuer ulists |
| 34 | * - performance benchmarks |
| 35 | * - check all ioctl parameters |
| 36 | */ |
| 37 | |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 38 | /* |
| 39 | * Helpers to access qgroup reservation |
| 40 | * |
| 41 | * Callers should ensure the lock context and type are valid |
| 42 | */ |
| 43 | |
| 44 | static u64 qgroup_rsv_total(const struct btrfs_qgroup *qgroup) |
| 45 | { |
| 46 | u64 ret = 0; |
| 47 | int i; |
| 48 | |
| 49 | for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) |
| 50 | ret += qgroup->rsv.values[i]; |
| 51 | |
| 52 | return ret; |
| 53 | } |
| 54 | |
| 55 | #ifdef CONFIG_BTRFS_DEBUG |
| 56 | static const char *qgroup_rsv_type_str(enum btrfs_qgroup_rsv_type type) |
| 57 | { |
| 58 | if (type == BTRFS_QGROUP_RSV_DATA) |
| 59 | return "data"; |
Qu Wenruo | 733e03a | 2017-12-12 15:34:29 +0800 | [diff] [blame] | 60 | if (type == BTRFS_QGROUP_RSV_META_PERTRANS) |
| 61 | return "meta_pertrans"; |
| 62 | if (type == BTRFS_QGROUP_RSV_META_PREALLOC) |
| 63 | return "meta_prealloc"; |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 64 | return NULL; |
| 65 | } |
| 66 | #endif |
| 67 | |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 68 | static void qgroup_rsv_add(struct btrfs_fs_info *fs_info, |
| 69 | struct btrfs_qgroup *qgroup, u64 num_bytes, |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 70 | enum btrfs_qgroup_rsv_type type) |
| 71 | { |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 72 | trace_qgroup_update_reserve(fs_info, qgroup, num_bytes, type); |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 73 | qgroup->rsv.values[type] += num_bytes; |
| 74 | } |
| 75 | |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 76 | static void qgroup_rsv_release(struct btrfs_fs_info *fs_info, |
| 77 | struct btrfs_qgroup *qgroup, u64 num_bytes, |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 78 | enum btrfs_qgroup_rsv_type type) |
| 79 | { |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 80 | trace_qgroup_update_reserve(fs_info, qgroup, -(s64)num_bytes, type); |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 81 | if (qgroup->rsv.values[type] >= num_bytes) { |
| 82 | qgroup->rsv.values[type] -= num_bytes; |
| 83 | return; |
| 84 | } |
| 85 | #ifdef CONFIG_BTRFS_DEBUG |
| 86 | WARN_RATELIMIT(1, |
| 87 | "qgroup %llu %s reserved space underflow, have %llu to free %llu", |
| 88 | qgroup->qgroupid, qgroup_rsv_type_str(type), |
| 89 | qgroup->rsv.values[type], num_bytes); |
| 90 | #endif |
| 91 | qgroup->rsv.values[type] = 0; |
| 92 | } |
| 93 | |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 94 | static void qgroup_rsv_add_by_qgroup(struct btrfs_fs_info *fs_info, |
| 95 | struct btrfs_qgroup *dest, |
| 96 | struct btrfs_qgroup *src) |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 97 | { |
| 98 | int i; |
| 99 | |
| 100 | for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 101 | qgroup_rsv_add(fs_info, dest, src->rsv.values[i], i); |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 102 | } |
| 103 | |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 104 | static void qgroup_rsv_release_by_qgroup(struct btrfs_fs_info *fs_info, |
| 105 | struct btrfs_qgroup *dest, |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 106 | struct btrfs_qgroup *src) |
| 107 | { |
| 108 | int i; |
| 109 | |
| 110 | for (i = 0; i < BTRFS_QGROUP_RSV_LAST; i++) |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 111 | qgroup_rsv_release(fs_info, dest, src->rsv.values[i], i); |
Qu Wenruo | f59c034 | 2017-12-12 15:34:24 +0800 | [diff] [blame] | 112 | } |
| 113 | |
Qu Wenruo | 9c54213 | 2015-03-12 16:10:13 +0800 | [diff] [blame] | 114 | static void btrfs_qgroup_update_old_refcnt(struct btrfs_qgroup *qg, u64 seq, |
| 115 | int mod) |
| 116 | { |
| 117 | if (qg->old_refcnt < seq) |
| 118 | qg->old_refcnt = seq; |
| 119 | qg->old_refcnt += mod; |
| 120 | } |
| 121 | |
| 122 | static void btrfs_qgroup_update_new_refcnt(struct btrfs_qgroup *qg, u64 seq, |
| 123 | int mod) |
| 124 | { |
| 125 | if (qg->new_refcnt < seq) |
| 126 | qg->new_refcnt = seq; |
| 127 | qg->new_refcnt += mod; |
| 128 | } |
| 129 | |
| 130 | static inline u64 btrfs_qgroup_get_old_refcnt(struct btrfs_qgroup *qg, u64 seq) |
| 131 | { |
| 132 | if (qg->old_refcnt < seq) |
| 133 | return 0; |
| 134 | return qg->old_refcnt - seq; |
| 135 | } |
| 136 | |
| 137 | static inline u64 btrfs_qgroup_get_new_refcnt(struct btrfs_qgroup *qg, u64 seq) |
| 138 | { |
| 139 | if (qg->new_refcnt < seq) |
| 140 | return 0; |
| 141 | return qg->new_refcnt - seq; |
| 142 | } |
| 143 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 144 | /* |
| 145 | * glue structure to represent the relations between qgroups. |
| 146 | */ |
| 147 | struct btrfs_qgroup_list { |
| 148 | struct list_head next_group; |
| 149 | struct list_head next_member; |
| 150 | struct btrfs_qgroup *group; |
| 151 | struct btrfs_qgroup *member; |
| 152 | }; |
| 153 | |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 154 | static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg) |
| 155 | { |
| 156 | return (u64)(uintptr_t)qg; |
| 157 | } |
| 158 | |
| 159 | static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n) |
| 160 | { |
| 161 | return (struct btrfs_qgroup *)(uintptr_t)n->aux; |
| 162 | } |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 163 | |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 164 | static int |
| 165 | qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, |
| 166 | int init_flags); |
| 167 | static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 168 | |
Wang Shilong | 58400fc | 2013-04-07 10:50:17 +0000 | [diff] [blame] | 169 | /* must be called with qgroup_ioctl_lock held */ |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 170 | static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info, |
| 171 | u64 qgroupid) |
| 172 | { |
| 173 | struct rb_node *n = fs_info->qgroup_tree.rb_node; |
| 174 | struct btrfs_qgroup *qgroup; |
| 175 | |
| 176 | while (n) { |
| 177 | qgroup = rb_entry(n, struct btrfs_qgroup, node); |
| 178 | if (qgroup->qgroupid < qgroupid) |
| 179 | n = n->rb_left; |
| 180 | else if (qgroup->qgroupid > qgroupid) |
| 181 | n = n->rb_right; |
| 182 | else |
| 183 | return qgroup; |
| 184 | } |
| 185 | return NULL; |
| 186 | } |
| 187 | |
| 188 | /* must be called with qgroup_lock held */ |
| 189 | static struct btrfs_qgroup *add_qgroup_rb(struct btrfs_fs_info *fs_info, |
| 190 | u64 qgroupid) |
| 191 | { |
| 192 | struct rb_node **p = &fs_info->qgroup_tree.rb_node; |
| 193 | struct rb_node *parent = NULL; |
| 194 | struct btrfs_qgroup *qgroup; |
| 195 | |
| 196 | while (*p) { |
| 197 | parent = *p; |
| 198 | qgroup = rb_entry(parent, struct btrfs_qgroup, node); |
| 199 | |
| 200 | if (qgroup->qgroupid < qgroupid) |
| 201 | p = &(*p)->rb_left; |
| 202 | else if (qgroup->qgroupid > qgroupid) |
| 203 | p = &(*p)->rb_right; |
| 204 | else |
| 205 | return qgroup; |
| 206 | } |
| 207 | |
| 208 | qgroup = kzalloc(sizeof(*qgroup), GFP_ATOMIC); |
| 209 | if (!qgroup) |
| 210 | return ERR_PTR(-ENOMEM); |
| 211 | |
| 212 | qgroup->qgroupid = qgroupid; |
| 213 | INIT_LIST_HEAD(&qgroup->groups); |
| 214 | INIT_LIST_HEAD(&qgroup->members); |
| 215 | INIT_LIST_HEAD(&qgroup->dirty); |
| 216 | |
| 217 | rb_link_node(&qgroup->node, parent, p); |
| 218 | rb_insert_color(&qgroup->node, &fs_info->qgroup_tree); |
| 219 | |
| 220 | return qgroup; |
| 221 | } |
| 222 | |
Wang Shilong | 4082bd3 | 2013-08-14 09:13:36 +0800 | [diff] [blame] | 223 | static void __del_qgroup_rb(struct btrfs_qgroup *qgroup) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 224 | { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 225 | struct btrfs_qgroup_list *list; |
| 226 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 227 | list_del(&qgroup->dirty); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 228 | while (!list_empty(&qgroup->groups)) { |
| 229 | list = list_first_entry(&qgroup->groups, |
| 230 | struct btrfs_qgroup_list, next_group); |
| 231 | list_del(&list->next_group); |
| 232 | list_del(&list->next_member); |
| 233 | kfree(list); |
| 234 | } |
| 235 | |
| 236 | while (!list_empty(&qgroup->members)) { |
| 237 | list = list_first_entry(&qgroup->members, |
| 238 | struct btrfs_qgroup_list, next_member); |
| 239 | list_del(&list->next_group); |
| 240 | list_del(&list->next_member); |
| 241 | kfree(list); |
| 242 | } |
| 243 | kfree(qgroup); |
Wang Shilong | 4082bd3 | 2013-08-14 09:13:36 +0800 | [diff] [blame] | 244 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 245 | |
Wang Shilong | 4082bd3 | 2013-08-14 09:13:36 +0800 | [diff] [blame] | 246 | /* must be called with qgroup_lock held */ |
| 247 | static int del_qgroup_rb(struct btrfs_fs_info *fs_info, u64 qgroupid) |
| 248 | { |
| 249 | struct btrfs_qgroup *qgroup = find_qgroup_rb(fs_info, qgroupid); |
| 250 | |
| 251 | if (!qgroup) |
| 252 | return -ENOENT; |
| 253 | |
| 254 | rb_erase(&qgroup->node, &fs_info->qgroup_tree); |
| 255 | __del_qgroup_rb(qgroup); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 256 | return 0; |
| 257 | } |
| 258 | |
| 259 | /* must be called with qgroup_lock held */ |
| 260 | static int add_relation_rb(struct btrfs_fs_info *fs_info, |
| 261 | u64 memberid, u64 parentid) |
| 262 | { |
| 263 | struct btrfs_qgroup *member; |
| 264 | struct btrfs_qgroup *parent; |
| 265 | struct btrfs_qgroup_list *list; |
| 266 | |
| 267 | member = find_qgroup_rb(fs_info, memberid); |
| 268 | parent = find_qgroup_rb(fs_info, parentid); |
| 269 | if (!member || !parent) |
| 270 | return -ENOENT; |
| 271 | |
| 272 | list = kzalloc(sizeof(*list), GFP_ATOMIC); |
| 273 | if (!list) |
| 274 | return -ENOMEM; |
| 275 | |
| 276 | list->group = parent; |
| 277 | list->member = member; |
| 278 | list_add_tail(&list->next_group, &member->groups); |
| 279 | list_add_tail(&list->next_member, &parent->members); |
| 280 | |
| 281 | return 0; |
| 282 | } |
| 283 | |
| 284 | /* must be called with qgroup_lock held */ |
| 285 | static int del_relation_rb(struct btrfs_fs_info *fs_info, |
| 286 | u64 memberid, u64 parentid) |
| 287 | { |
| 288 | struct btrfs_qgroup *member; |
| 289 | struct btrfs_qgroup *parent; |
| 290 | struct btrfs_qgroup_list *list; |
| 291 | |
| 292 | member = find_qgroup_rb(fs_info, memberid); |
| 293 | parent = find_qgroup_rb(fs_info, parentid); |
| 294 | if (!member || !parent) |
| 295 | return -ENOENT; |
| 296 | |
| 297 | list_for_each_entry(list, &member->groups, next_group) { |
| 298 | if (list->group == parent) { |
| 299 | list_del(&list->next_group); |
| 300 | list_del(&list->next_member); |
| 301 | kfree(list); |
| 302 | return 0; |
| 303 | } |
| 304 | } |
| 305 | return -ENOENT; |
| 306 | } |
| 307 | |
Josef Bacik | faa2dbf | 2014-05-07 17:06:09 -0400 | [diff] [blame] | 308 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 309 | int btrfs_verify_qgroup_counts(struct btrfs_fs_info *fs_info, u64 qgroupid, |
| 310 | u64 rfer, u64 excl) |
| 311 | { |
| 312 | struct btrfs_qgroup *qgroup; |
| 313 | |
| 314 | qgroup = find_qgroup_rb(fs_info, qgroupid); |
| 315 | if (!qgroup) |
| 316 | return -EINVAL; |
| 317 | if (qgroup->rfer != rfer || qgroup->excl != excl) |
| 318 | return -EINVAL; |
| 319 | return 0; |
| 320 | } |
| 321 | #endif |
| 322 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 323 | /* |
| 324 | * The full config is read in one go, only called from open_ctree() |
| 325 | * It doesn't use any locking, as at this point we're still single-threaded |
| 326 | */ |
| 327 | int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) |
| 328 | { |
| 329 | struct btrfs_key key; |
| 330 | struct btrfs_key found_key; |
| 331 | struct btrfs_root *quota_root = fs_info->quota_root; |
| 332 | struct btrfs_path *path = NULL; |
| 333 | struct extent_buffer *l; |
| 334 | int slot; |
| 335 | int ret = 0; |
| 336 | u64 flags = 0; |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 337 | u64 rescan_progress = 0; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 338 | |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 339 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 340 | return 0; |
| 341 | |
David Sterba | 323b88f | 2017-02-13 12:10:20 +0100 | [diff] [blame] | 342 | fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 343 | if (!fs_info->qgroup_ulist) { |
| 344 | ret = -ENOMEM; |
| 345 | goto out; |
| 346 | } |
| 347 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 348 | path = btrfs_alloc_path(); |
| 349 | if (!path) { |
| 350 | ret = -ENOMEM; |
| 351 | goto out; |
| 352 | } |
| 353 | |
| 354 | /* default this to quota off, in case no status key is found */ |
| 355 | fs_info->qgroup_flags = 0; |
| 356 | |
| 357 | /* |
| 358 | * pass 1: read status, all qgroup infos and limits |
| 359 | */ |
| 360 | key.objectid = 0; |
| 361 | key.type = 0; |
| 362 | key.offset = 0; |
| 363 | ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 1); |
| 364 | if (ret) |
| 365 | goto out; |
| 366 | |
| 367 | while (1) { |
| 368 | struct btrfs_qgroup *qgroup; |
| 369 | |
| 370 | slot = path->slots[0]; |
| 371 | l = path->nodes[0]; |
| 372 | btrfs_item_key_to_cpu(l, &found_key, slot); |
| 373 | |
| 374 | if (found_key.type == BTRFS_QGROUP_STATUS_KEY) { |
| 375 | struct btrfs_qgroup_status_item *ptr; |
| 376 | |
| 377 | ptr = btrfs_item_ptr(l, slot, |
| 378 | struct btrfs_qgroup_status_item); |
| 379 | |
| 380 | if (btrfs_qgroup_status_version(l, ptr) != |
| 381 | BTRFS_QGROUP_STATUS_VERSION) { |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 382 | btrfs_err(fs_info, |
| 383 | "old qgroup version, quota disabled"); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 384 | goto out; |
| 385 | } |
| 386 | if (btrfs_qgroup_status_generation(l, ptr) != |
| 387 | fs_info->generation) { |
| 388 | flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 389 | btrfs_err(fs_info, |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 390 | "qgroup generation mismatch, marked as inconsistent"); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 391 | } |
| 392 | fs_info->qgroup_flags = btrfs_qgroup_status_flags(l, |
| 393 | ptr); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 394 | rescan_progress = btrfs_qgroup_status_rescan(l, ptr); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 395 | goto next1; |
| 396 | } |
| 397 | |
| 398 | if (found_key.type != BTRFS_QGROUP_INFO_KEY && |
| 399 | found_key.type != BTRFS_QGROUP_LIMIT_KEY) |
| 400 | goto next1; |
| 401 | |
| 402 | qgroup = find_qgroup_rb(fs_info, found_key.offset); |
| 403 | if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || |
| 404 | (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { |
Geert Uytterhoeven | d41e36a | 2015-07-06 15:38:11 +0200 | [diff] [blame] | 405 | btrfs_err(fs_info, "inconsistent qgroup config"); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 406 | flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 407 | } |
| 408 | if (!qgroup) { |
| 409 | qgroup = add_qgroup_rb(fs_info, found_key.offset); |
| 410 | if (IS_ERR(qgroup)) { |
| 411 | ret = PTR_ERR(qgroup); |
| 412 | goto out; |
| 413 | } |
| 414 | } |
| 415 | switch (found_key.type) { |
| 416 | case BTRFS_QGROUP_INFO_KEY: { |
| 417 | struct btrfs_qgroup_info_item *ptr; |
| 418 | |
| 419 | ptr = btrfs_item_ptr(l, slot, |
| 420 | struct btrfs_qgroup_info_item); |
| 421 | qgroup->rfer = btrfs_qgroup_info_rfer(l, ptr); |
| 422 | qgroup->rfer_cmpr = btrfs_qgroup_info_rfer_cmpr(l, ptr); |
| 423 | qgroup->excl = btrfs_qgroup_info_excl(l, ptr); |
| 424 | qgroup->excl_cmpr = btrfs_qgroup_info_excl_cmpr(l, ptr); |
| 425 | /* generation currently unused */ |
| 426 | break; |
| 427 | } |
| 428 | case BTRFS_QGROUP_LIMIT_KEY: { |
| 429 | struct btrfs_qgroup_limit_item *ptr; |
| 430 | |
| 431 | ptr = btrfs_item_ptr(l, slot, |
| 432 | struct btrfs_qgroup_limit_item); |
| 433 | qgroup->lim_flags = btrfs_qgroup_limit_flags(l, ptr); |
| 434 | qgroup->max_rfer = btrfs_qgroup_limit_max_rfer(l, ptr); |
| 435 | qgroup->max_excl = btrfs_qgroup_limit_max_excl(l, ptr); |
| 436 | qgroup->rsv_rfer = btrfs_qgroup_limit_rsv_rfer(l, ptr); |
| 437 | qgroup->rsv_excl = btrfs_qgroup_limit_rsv_excl(l, ptr); |
| 438 | break; |
| 439 | } |
| 440 | } |
| 441 | next1: |
| 442 | ret = btrfs_next_item(quota_root, path); |
| 443 | if (ret < 0) |
| 444 | goto out; |
| 445 | if (ret) |
| 446 | break; |
| 447 | } |
| 448 | btrfs_release_path(path); |
| 449 | |
| 450 | /* |
| 451 | * pass 2: read all qgroup relations |
| 452 | */ |
| 453 | key.objectid = 0; |
| 454 | key.type = BTRFS_QGROUP_RELATION_KEY; |
| 455 | key.offset = 0; |
| 456 | ret = btrfs_search_slot_for_read(quota_root, &key, path, 1, 0); |
| 457 | if (ret) |
| 458 | goto out; |
| 459 | while (1) { |
| 460 | slot = path->slots[0]; |
| 461 | l = path->nodes[0]; |
| 462 | btrfs_item_key_to_cpu(l, &found_key, slot); |
| 463 | |
| 464 | if (found_key.type != BTRFS_QGROUP_RELATION_KEY) |
| 465 | goto next2; |
| 466 | |
| 467 | if (found_key.objectid > found_key.offset) { |
| 468 | /* parent <- member, not needed to build config */ |
| 469 | /* FIXME should we omit the key completely? */ |
| 470 | goto next2; |
| 471 | } |
| 472 | |
| 473 | ret = add_relation_rb(fs_info, found_key.objectid, |
| 474 | found_key.offset); |
Arne Jansen | ff24858 | 2013-01-17 01:22:08 -0700 | [diff] [blame] | 475 | if (ret == -ENOENT) { |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 476 | btrfs_warn(fs_info, |
| 477 | "orphan qgroup relation 0x%llx->0x%llx", |
Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 478 | found_key.objectid, found_key.offset); |
Arne Jansen | ff24858 | 2013-01-17 01:22:08 -0700 | [diff] [blame] | 479 | ret = 0; /* ignore the error */ |
| 480 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 481 | if (ret) |
| 482 | goto out; |
| 483 | next2: |
| 484 | ret = btrfs_next_item(quota_root, path); |
| 485 | if (ret < 0) |
| 486 | goto out; |
| 487 | if (ret) |
| 488 | break; |
| 489 | } |
| 490 | out: |
| 491 | fs_info->qgroup_flags |= flags; |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 492 | if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) |
| 493 | clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
| 494 | else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN && |
| 495 | ret >= 0) |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 496 | ret = qgroup_rescan_init(fs_info, rescan_progress, 0); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 497 | btrfs_free_path(path); |
| 498 | |
Jan Schmidt | eb1716a | 2013-05-28 15:47:23 +0000 | [diff] [blame] | 499 | if (ret < 0) { |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 500 | ulist_free(fs_info->qgroup_ulist); |
Jan Schmidt | eb1716a | 2013-05-28 15:47:23 +0000 | [diff] [blame] | 501 | fs_info->qgroup_ulist = NULL; |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 502 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
Jan Schmidt | eb1716a | 2013-05-28 15:47:23 +0000 | [diff] [blame] | 503 | } |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 504 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 505 | return ret < 0 ? ret : 0; |
| 506 | } |
| 507 | |
| 508 | /* |
Wang Shilong | e685da1 | 2013-08-14 09:13:37 +0800 | [diff] [blame] | 509 | * This is called from close_ctree() or open_ctree() or btrfs_quota_disable(), |
| 510 | * first two are in single-threaded paths.And for the third one, we have set |
| 511 | * quota_root to be null with qgroup_lock held before, so it is safe to clean |
| 512 | * up the in-memory structures without qgroup_lock held. |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 513 | */ |
| 514 | void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info) |
| 515 | { |
| 516 | struct rb_node *n; |
| 517 | struct btrfs_qgroup *qgroup; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 518 | |
| 519 | while ((n = rb_first(&fs_info->qgroup_tree))) { |
| 520 | qgroup = rb_entry(n, struct btrfs_qgroup, node); |
| 521 | rb_erase(n, &fs_info->qgroup_tree); |
Wang Shilong | 4082bd3 | 2013-08-14 09:13:36 +0800 | [diff] [blame] | 522 | __del_qgroup_rb(qgroup); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 523 | } |
Wang Shilong | 1e7bac1 | 2013-07-13 21:02:54 +0800 | [diff] [blame] | 524 | /* |
| 525 | * we call btrfs_free_qgroup_config() when umounting |
Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 526 | * filesystem and disabling quota, so we set qgroup_ulist |
Wang Shilong | 1e7bac1 | 2013-07-13 21:02:54 +0800 | [diff] [blame] | 527 | * to be null here to avoid double free. |
| 528 | */ |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 529 | ulist_free(fs_info->qgroup_ulist); |
Wang Shilong | 1e7bac1 | 2013-07-13 21:02:54 +0800 | [diff] [blame] | 530 | fs_info->qgroup_ulist = NULL; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 531 | } |
| 532 | |
Lu Fengqi | 711169c | 2018-07-18 14:45:24 +0800 | [diff] [blame^] | 533 | static int add_qgroup_relation_item(struct btrfs_trans_handle *trans, u64 src, |
| 534 | u64 dst) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 535 | { |
| 536 | int ret; |
Lu Fengqi | 711169c | 2018-07-18 14:45:24 +0800 | [diff] [blame^] | 537 | struct btrfs_root *quota_root = trans->fs_info->quota_root; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 538 | struct btrfs_path *path; |
| 539 | struct btrfs_key key; |
| 540 | |
| 541 | path = btrfs_alloc_path(); |
| 542 | if (!path) |
| 543 | return -ENOMEM; |
| 544 | |
| 545 | key.objectid = src; |
| 546 | key.type = BTRFS_QGROUP_RELATION_KEY; |
| 547 | key.offset = dst; |
| 548 | |
| 549 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, 0); |
| 550 | |
| 551 | btrfs_mark_buffer_dirty(path->nodes[0]); |
| 552 | |
| 553 | btrfs_free_path(path); |
| 554 | return ret; |
| 555 | } |
| 556 | |
| 557 | static int del_qgroup_relation_item(struct btrfs_trans_handle *trans, |
| 558 | struct btrfs_root *quota_root, |
| 559 | u64 src, u64 dst) |
| 560 | { |
| 561 | int ret; |
| 562 | struct btrfs_path *path; |
| 563 | struct btrfs_key key; |
| 564 | |
| 565 | path = btrfs_alloc_path(); |
| 566 | if (!path) |
| 567 | return -ENOMEM; |
| 568 | |
| 569 | key.objectid = src; |
| 570 | key.type = BTRFS_QGROUP_RELATION_KEY; |
| 571 | key.offset = dst; |
| 572 | |
| 573 | ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); |
| 574 | if (ret < 0) |
| 575 | goto out; |
| 576 | |
| 577 | if (ret > 0) { |
| 578 | ret = -ENOENT; |
| 579 | goto out; |
| 580 | } |
| 581 | |
| 582 | ret = btrfs_del_item(trans, quota_root, path); |
| 583 | out: |
| 584 | btrfs_free_path(path); |
| 585 | return ret; |
| 586 | } |
| 587 | |
| 588 | static int add_qgroup_item(struct btrfs_trans_handle *trans, |
| 589 | struct btrfs_root *quota_root, u64 qgroupid) |
| 590 | { |
| 591 | int ret; |
| 592 | struct btrfs_path *path; |
| 593 | struct btrfs_qgroup_info_item *qgroup_info; |
| 594 | struct btrfs_qgroup_limit_item *qgroup_limit; |
| 595 | struct extent_buffer *leaf; |
| 596 | struct btrfs_key key; |
| 597 | |
Jeff Mahoney | f5ee5c9 | 2016-06-21 09:52:41 -0400 | [diff] [blame] | 598 | if (btrfs_is_testing(quota_root->fs_info)) |
Josef Bacik | faa2dbf | 2014-05-07 17:06:09 -0400 | [diff] [blame] | 599 | return 0; |
David Sterba | fccb84c | 2014-09-29 23:53:21 +0200 | [diff] [blame] | 600 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 601 | path = btrfs_alloc_path(); |
| 602 | if (!path) |
| 603 | return -ENOMEM; |
| 604 | |
| 605 | key.objectid = 0; |
| 606 | key.type = BTRFS_QGROUP_INFO_KEY; |
| 607 | key.offset = qgroupid; |
| 608 | |
Mark Fasheh | 0b4699d | 2014-08-18 14:01:17 -0700 | [diff] [blame] | 609 | /* |
| 610 | * Avoid a transaction abort by catching -EEXIST here. In that |
| 611 | * case, we proceed by re-initializing the existing structure |
| 612 | * on disk. |
| 613 | */ |
| 614 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 615 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, |
| 616 | sizeof(*qgroup_info)); |
Mark Fasheh | 0b4699d | 2014-08-18 14:01:17 -0700 | [diff] [blame] | 617 | if (ret && ret != -EEXIST) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 618 | goto out; |
| 619 | |
| 620 | leaf = path->nodes[0]; |
| 621 | qgroup_info = btrfs_item_ptr(leaf, path->slots[0], |
| 622 | struct btrfs_qgroup_info_item); |
| 623 | btrfs_set_qgroup_info_generation(leaf, qgroup_info, trans->transid); |
| 624 | btrfs_set_qgroup_info_rfer(leaf, qgroup_info, 0); |
| 625 | btrfs_set_qgroup_info_rfer_cmpr(leaf, qgroup_info, 0); |
| 626 | btrfs_set_qgroup_info_excl(leaf, qgroup_info, 0); |
| 627 | btrfs_set_qgroup_info_excl_cmpr(leaf, qgroup_info, 0); |
| 628 | |
| 629 | btrfs_mark_buffer_dirty(leaf); |
| 630 | |
| 631 | btrfs_release_path(path); |
| 632 | |
| 633 | key.type = BTRFS_QGROUP_LIMIT_KEY; |
| 634 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, |
| 635 | sizeof(*qgroup_limit)); |
Mark Fasheh | 0b4699d | 2014-08-18 14:01:17 -0700 | [diff] [blame] | 636 | if (ret && ret != -EEXIST) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 637 | goto out; |
| 638 | |
| 639 | leaf = path->nodes[0]; |
| 640 | qgroup_limit = btrfs_item_ptr(leaf, path->slots[0], |
| 641 | struct btrfs_qgroup_limit_item); |
| 642 | btrfs_set_qgroup_limit_flags(leaf, qgroup_limit, 0); |
| 643 | btrfs_set_qgroup_limit_max_rfer(leaf, qgroup_limit, 0); |
| 644 | btrfs_set_qgroup_limit_max_excl(leaf, qgroup_limit, 0); |
| 645 | btrfs_set_qgroup_limit_rsv_rfer(leaf, qgroup_limit, 0); |
| 646 | btrfs_set_qgroup_limit_rsv_excl(leaf, qgroup_limit, 0); |
| 647 | |
| 648 | btrfs_mark_buffer_dirty(leaf); |
| 649 | |
| 650 | ret = 0; |
| 651 | out: |
| 652 | btrfs_free_path(path); |
| 653 | return ret; |
| 654 | } |
| 655 | |
| 656 | static int del_qgroup_item(struct btrfs_trans_handle *trans, |
| 657 | struct btrfs_root *quota_root, u64 qgroupid) |
| 658 | { |
| 659 | int ret; |
| 660 | struct btrfs_path *path; |
| 661 | struct btrfs_key key; |
| 662 | |
| 663 | path = btrfs_alloc_path(); |
| 664 | if (!path) |
| 665 | return -ENOMEM; |
| 666 | |
| 667 | key.objectid = 0; |
| 668 | key.type = BTRFS_QGROUP_INFO_KEY; |
| 669 | key.offset = qgroupid; |
| 670 | ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); |
| 671 | if (ret < 0) |
| 672 | goto out; |
| 673 | |
| 674 | if (ret > 0) { |
| 675 | ret = -ENOENT; |
| 676 | goto out; |
| 677 | } |
| 678 | |
| 679 | ret = btrfs_del_item(trans, quota_root, path); |
| 680 | if (ret) |
| 681 | goto out; |
| 682 | |
| 683 | btrfs_release_path(path); |
| 684 | |
| 685 | key.type = BTRFS_QGROUP_LIMIT_KEY; |
| 686 | ret = btrfs_search_slot(trans, quota_root, &key, path, -1, 1); |
| 687 | if (ret < 0) |
| 688 | goto out; |
| 689 | |
| 690 | if (ret > 0) { |
| 691 | ret = -ENOENT; |
| 692 | goto out; |
| 693 | } |
| 694 | |
| 695 | ret = btrfs_del_item(trans, quota_root, path); |
| 696 | |
| 697 | out: |
| 698 | btrfs_free_path(path); |
| 699 | return ret; |
| 700 | } |
| 701 | |
| 702 | static int update_qgroup_limit_item(struct btrfs_trans_handle *trans, |
Dongsheng Yang | 1510e71 | 2014-11-20 21:01:41 -0500 | [diff] [blame] | 703 | struct btrfs_root *root, |
| 704 | struct btrfs_qgroup *qgroup) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 705 | { |
| 706 | struct btrfs_path *path; |
| 707 | struct btrfs_key key; |
| 708 | struct extent_buffer *l; |
| 709 | struct btrfs_qgroup_limit_item *qgroup_limit; |
| 710 | int ret; |
| 711 | int slot; |
| 712 | |
| 713 | key.objectid = 0; |
| 714 | key.type = BTRFS_QGROUP_LIMIT_KEY; |
Dongsheng Yang | 1510e71 | 2014-11-20 21:01:41 -0500 | [diff] [blame] | 715 | key.offset = qgroup->qgroupid; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 716 | |
| 717 | path = btrfs_alloc_path(); |
Wang Shilong | 84cbe2f | 2013-02-27 11:20:56 +0000 | [diff] [blame] | 718 | if (!path) |
| 719 | return -ENOMEM; |
| 720 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 721 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
| 722 | if (ret > 0) |
| 723 | ret = -ENOENT; |
| 724 | |
| 725 | if (ret) |
| 726 | goto out; |
| 727 | |
| 728 | l = path->nodes[0]; |
| 729 | slot = path->slots[0]; |
Valentina Giusti | a3df41e | 2013-11-04 22:34:29 +0100 | [diff] [blame] | 730 | qgroup_limit = btrfs_item_ptr(l, slot, struct btrfs_qgroup_limit_item); |
Dongsheng Yang | 1510e71 | 2014-11-20 21:01:41 -0500 | [diff] [blame] | 731 | btrfs_set_qgroup_limit_flags(l, qgroup_limit, qgroup->lim_flags); |
| 732 | btrfs_set_qgroup_limit_max_rfer(l, qgroup_limit, qgroup->max_rfer); |
| 733 | btrfs_set_qgroup_limit_max_excl(l, qgroup_limit, qgroup->max_excl); |
| 734 | btrfs_set_qgroup_limit_rsv_rfer(l, qgroup_limit, qgroup->rsv_rfer); |
| 735 | btrfs_set_qgroup_limit_rsv_excl(l, qgroup_limit, qgroup->rsv_excl); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 736 | |
| 737 | btrfs_mark_buffer_dirty(l); |
| 738 | |
| 739 | out: |
| 740 | btrfs_free_path(path); |
| 741 | return ret; |
| 742 | } |
| 743 | |
| 744 | static int update_qgroup_info_item(struct btrfs_trans_handle *trans, |
| 745 | struct btrfs_root *root, |
| 746 | struct btrfs_qgroup *qgroup) |
| 747 | { |
| 748 | struct btrfs_path *path; |
| 749 | struct btrfs_key key; |
| 750 | struct extent_buffer *l; |
| 751 | struct btrfs_qgroup_info_item *qgroup_info; |
| 752 | int ret; |
| 753 | int slot; |
| 754 | |
Jeff Mahoney | f5ee5c9 | 2016-06-21 09:52:41 -0400 | [diff] [blame] | 755 | if (btrfs_is_testing(root->fs_info)) |
Josef Bacik | faa2dbf | 2014-05-07 17:06:09 -0400 | [diff] [blame] | 756 | return 0; |
David Sterba | fccb84c | 2014-09-29 23:53:21 +0200 | [diff] [blame] | 757 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 758 | key.objectid = 0; |
| 759 | key.type = BTRFS_QGROUP_INFO_KEY; |
| 760 | key.offset = qgroup->qgroupid; |
| 761 | |
| 762 | path = btrfs_alloc_path(); |
Wang Shilong | 84cbe2f | 2013-02-27 11:20:56 +0000 | [diff] [blame] | 763 | if (!path) |
| 764 | return -ENOMEM; |
| 765 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 766 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
| 767 | if (ret > 0) |
| 768 | ret = -ENOENT; |
| 769 | |
| 770 | if (ret) |
| 771 | goto out; |
| 772 | |
| 773 | l = path->nodes[0]; |
| 774 | slot = path->slots[0]; |
Valentina Giusti | a3df41e | 2013-11-04 22:34:29 +0100 | [diff] [blame] | 775 | qgroup_info = btrfs_item_ptr(l, slot, struct btrfs_qgroup_info_item); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 776 | btrfs_set_qgroup_info_generation(l, qgroup_info, trans->transid); |
| 777 | btrfs_set_qgroup_info_rfer(l, qgroup_info, qgroup->rfer); |
| 778 | btrfs_set_qgroup_info_rfer_cmpr(l, qgroup_info, qgroup->rfer_cmpr); |
| 779 | btrfs_set_qgroup_info_excl(l, qgroup_info, qgroup->excl); |
| 780 | btrfs_set_qgroup_info_excl_cmpr(l, qgroup_info, qgroup->excl_cmpr); |
| 781 | |
| 782 | btrfs_mark_buffer_dirty(l); |
| 783 | |
| 784 | out: |
| 785 | btrfs_free_path(path); |
| 786 | return ret; |
| 787 | } |
| 788 | |
| 789 | static int update_qgroup_status_item(struct btrfs_trans_handle *trans, |
| 790 | struct btrfs_fs_info *fs_info, |
| 791 | struct btrfs_root *root) |
| 792 | { |
| 793 | struct btrfs_path *path; |
| 794 | struct btrfs_key key; |
| 795 | struct extent_buffer *l; |
| 796 | struct btrfs_qgroup_status_item *ptr; |
| 797 | int ret; |
| 798 | int slot; |
| 799 | |
| 800 | key.objectid = 0; |
| 801 | key.type = BTRFS_QGROUP_STATUS_KEY; |
| 802 | key.offset = 0; |
| 803 | |
| 804 | path = btrfs_alloc_path(); |
Wang Shilong | 84cbe2f | 2013-02-27 11:20:56 +0000 | [diff] [blame] | 805 | if (!path) |
| 806 | return -ENOMEM; |
| 807 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 808 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
| 809 | if (ret > 0) |
| 810 | ret = -ENOENT; |
| 811 | |
| 812 | if (ret) |
| 813 | goto out; |
| 814 | |
| 815 | l = path->nodes[0]; |
| 816 | slot = path->slots[0]; |
| 817 | ptr = btrfs_item_ptr(l, slot, struct btrfs_qgroup_status_item); |
| 818 | btrfs_set_qgroup_status_flags(l, ptr, fs_info->qgroup_flags); |
| 819 | btrfs_set_qgroup_status_generation(l, ptr, trans->transid); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 820 | btrfs_set_qgroup_status_rescan(l, ptr, |
| 821 | fs_info->qgroup_rescan_progress.objectid); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 822 | |
| 823 | btrfs_mark_buffer_dirty(l); |
| 824 | |
| 825 | out: |
| 826 | btrfs_free_path(path); |
| 827 | return ret; |
| 828 | } |
| 829 | |
| 830 | /* |
| 831 | * called with qgroup_lock held |
| 832 | */ |
| 833 | static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans, |
| 834 | struct btrfs_root *root) |
| 835 | { |
| 836 | struct btrfs_path *path; |
| 837 | struct btrfs_key key; |
Wang Shilong | 06b3a86 | 2013-02-27 11:16:57 +0000 | [diff] [blame] | 838 | struct extent_buffer *leaf = NULL; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 839 | int ret; |
Wang Shilong | 06b3a86 | 2013-02-27 11:16:57 +0000 | [diff] [blame] | 840 | int nr = 0; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 841 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 842 | path = btrfs_alloc_path(); |
| 843 | if (!path) |
| 844 | return -ENOMEM; |
| 845 | |
Wang Shilong | 06b3a86 | 2013-02-27 11:16:57 +0000 | [diff] [blame] | 846 | path->leave_spinning = 1; |
| 847 | |
| 848 | key.objectid = 0; |
| 849 | key.offset = 0; |
| 850 | key.type = 0; |
| 851 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 852 | while (1) { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 853 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
Wang Shilong | 06b3a86 | 2013-02-27 11:16:57 +0000 | [diff] [blame] | 854 | if (ret < 0) |
| 855 | goto out; |
| 856 | leaf = path->nodes[0]; |
| 857 | nr = btrfs_header_nritems(leaf); |
| 858 | if (!nr) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 859 | break; |
Wang Shilong | 06b3a86 | 2013-02-27 11:16:57 +0000 | [diff] [blame] | 860 | /* |
| 861 | * delete the leaf one by one |
| 862 | * since the whole tree is going |
| 863 | * to be deleted. |
| 864 | */ |
| 865 | path->slots[0] = 0; |
| 866 | ret = btrfs_del_items(trans, root, path, 0, nr); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 867 | if (ret) |
| 868 | goto out; |
Wang Shilong | 06b3a86 | 2013-02-27 11:16:57 +0000 | [diff] [blame] | 869 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 870 | btrfs_release_path(path); |
| 871 | } |
| 872 | ret = 0; |
| 873 | out: |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 874 | btrfs_free_path(path); |
| 875 | return ret; |
| 876 | } |
| 877 | |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 878 | int btrfs_quota_enable(struct btrfs_fs_info *fs_info) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 879 | { |
| 880 | struct btrfs_root *quota_root; |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 881 | struct btrfs_root *tree_root = fs_info->tree_root; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 882 | struct btrfs_path *path = NULL; |
| 883 | struct btrfs_qgroup_status_item *ptr; |
| 884 | struct extent_buffer *leaf; |
| 885 | struct btrfs_key key; |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 886 | struct btrfs_key found_key; |
| 887 | struct btrfs_qgroup *qgroup = NULL; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 888 | struct btrfs_trans_handle *trans = NULL; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 889 | int ret = 0; |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 890 | int slot; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 891 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 892 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
Nikolay Borisov | 5d23515 | 2018-01-31 10:52:04 +0200 | [diff] [blame] | 893 | if (fs_info->quota_root) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 894 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 895 | |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 896 | /* |
| 897 | * 1 for quota root item |
| 898 | * 1 for BTRFS_QGROUP_STATUS item |
| 899 | * |
| 900 | * Yet we also need 2*n items for a QGROUP_INFO/QGROUP_LIMIT items |
| 901 | * per subvolume. However those are not currently reserved since it |
| 902 | * would be a lot of overkill. |
| 903 | */ |
| 904 | trans = btrfs_start_transaction(tree_root, 2); |
| 905 | if (IS_ERR(trans)) { |
| 906 | ret = PTR_ERR(trans); |
| 907 | trans = NULL; |
| 908 | goto out; |
| 909 | } |
| 910 | |
David Sterba | 52bf8e7 | 2017-02-13 11:03:44 +0100 | [diff] [blame] | 911 | fs_info->qgroup_ulist = ulist_alloc(GFP_KERNEL); |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 912 | if (!fs_info->qgroup_ulist) { |
| 913 | ret = -ENOMEM; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 914 | btrfs_abort_transaction(trans, ret); |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 915 | goto out; |
| 916 | } |
| 917 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 918 | /* |
| 919 | * initially create the quota tree |
| 920 | */ |
| 921 | quota_root = btrfs_create_tree(trans, fs_info, |
| 922 | BTRFS_QUOTA_TREE_OBJECTID); |
| 923 | if (IS_ERR(quota_root)) { |
| 924 | ret = PTR_ERR(quota_root); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 925 | btrfs_abort_transaction(trans, ret); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 926 | goto out; |
| 927 | } |
| 928 | |
| 929 | path = btrfs_alloc_path(); |
Tsutomu Itoh | 5b7ff5b | 2012-10-16 05:44:21 +0000 | [diff] [blame] | 930 | if (!path) { |
| 931 | ret = -ENOMEM; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 932 | btrfs_abort_transaction(trans, ret); |
Tsutomu Itoh | 5b7ff5b | 2012-10-16 05:44:21 +0000 | [diff] [blame] | 933 | goto out_free_root; |
| 934 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 935 | |
| 936 | key.objectid = 0; |
| 937 | key.type = BTRFS_QGROUP_STATUS_KEY; |
| 938 | key.offset = 0; |
| 939 | |
| 940 | ret = btrfs_insert_empty_item(trans, quota_root, path, &key, |
| 941 | sizeof(*ptr)); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 942 | if (ret) { |
| 943 | btrfs_abort_transaction(trans, ret); |
Tsutomu Itoh | 5b7ff5b | 2012-10-16 05:44:21 +0000 | [diff] [blame] | 944 | goto out_free_path; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 945 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 946 | |
| 947 | leaf = path->nodes[0]; |
| 948 | ptr = btrfs_item_ptr(leaf, path->slots[0], |
| 949 | struct btrfs_qgroup_status_item); |
| 950 | btrfs_set_qgroup_status_generation(leaf, ptr, trans->transid); |
| 951 | btrfs_set_qgroup_status_version(leaf, ptr, BTRFS_QGROUP_STATUS_VERSION); |
| 952 | fs_info->qgroup_flags = BTRFS_QGROUP_STATUS_FLAG_ON | |
| 953 | BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 954 | btrfs_set_qgroup_status_flags(leaf, ptr, fs_info->qgroup_flags); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 955 | btrfs_set_qgroup_status_rescan(leaf, ptr, 0); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 956 | |
| 957 | btrfs_mark_buffer_dirty(leaf); |
| 958 | |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 959 | key.objectid = 0; |
| 960 | key.type = BTRFS_ROOT_REF_KEY; |
| 961 | key.offset = 0; |
| 962 | |
| 963 | btrfs_release_path(path); |
| 964 | ret = btrfs_search_slot_for_read(tree_root, &key, path, 1, 0); |
| 965 | if (ret > 0) |
| 966 | goto out_add_root; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 967 | if (ret < 0) { |
| 968 | btrfs_abort_transaction(trans, ret); |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 969 | goto out_free_path; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 970 | } |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 971 | |
| 972 | while (1) { |
| 973 | slot = path->slots[0]; |
| 974 | leaf = path->nodes[0]; |
| 975 | btrfs_item_key_to_cpu(leaf, &found_key, slot); |
| 976 | |
| 977 | if (found_key.type == BTRFS_ROOT_REF_KEY) { |
| 978 | ret = add_qgroup_item(trans, quota_root, |
| 979 | found_key.offset); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 980 | if (ret) { |
| 981 | btrfs_abort_transaction(trans, ret); |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 982 | goto out_free_path; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 983 | } |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 984 | |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 985 | qgroup = add_qgroup_rb(fs_info, found_key.offset); |
| 986 | if (IS_ERR(qgroup)) { |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 987 | ret = PTR_ERR(qgroup); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 988 | btrfs_abort_transaction(trans, ret); |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 989 | goto out_free_path; |
| 990 | } |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 991 | } |
| 992 | ret = btrfs_next_item(tree_root, path); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 993 | if (ret < 0) { |
| 994 | btrfs_abort_transaction(trans, ret); |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 995 | goto out_free_path; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 996 | } |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 997 | if (ret) |
| 998 | break; |
| 999 | } |
| 1000 | |
| 1001 | out_add_root: |
| 1002 | btrfs_release_path(path); |
| 1003 | ret = add_qgroup_item(trans, quota_root, BTRFS_FS_TREE_OBJECTID); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1004 | if (ret) { |
| 1005 | btrfs_abort_transaction(trans, ret); |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 1006 | goto out_free_path; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1007 | } |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 1008 | |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 1009 | qgroup = add_qgroup_rb(fs_info, BTRFS_FS_TREE_OBJECTID); |
| 1010 | if (IS_ERR(qgroup)) { |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 1011 | ret = PTR_ERR(qgroup); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1012 | btrfs_abort_transaction(trans, ret); |
Wang Shilong | 7708f02 | 2013-04-07 10:24:57 +0000 | [diff] [blame] | 1013 | goto out_free_path; |
| 1014 | } |
Wang Shilong | 58400fc | 2013-04-07 10:50:17 +0000 | [diff] [blame] | 1015 | spin_lock(&fs_info->qgroup_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1016 | fs_info->quota_root = quota_root; |
Nikolay Borisov | 5d23515 | 2018-01-31 10:52:04 +0200 | [diff] [blame] | 1017 | set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1018 | spin_unlock(&fs_info->qgroup_lock); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1019 | |
| 1020 | ret = btrfs_commit_transaction(trans); |
| 1021 | if (ret) { |
| 1022 | trans = NULL; |
| 1023 | goto out_free_path; |
| 1024 | } |
| 1025 | |
Nikolay Borisov | 5d23515 | 2018-01-31 10:52:04 +0200 | [diff] [blame] | 1026 | ret = qgroup_rescan_init(fs_info, 0, 1); |
| 1027 | if (!ret) { |
| 1028 | qgroup_rescan_zero_tracking(fs_info); |
| 1029 | btrfs_queue_work(fs_info->qgroup_rescan_workers, |
| 1030 | &fs_info->qgroup_rescan_work); |
| 1031 | } |
| 1032 | |
Tsutomu Itoh | 5b7ff5b | 2012-10-16 05:44:21 +0000 | [diff] [blame] | 1033 | out_free_path: |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1034 | btrfs_free_path(path); |
Tsutomu Itoh | 5b7ff5b | 2012-10-16 05:44:21 +0000 | [diff] [blame] | 1035 | out_free_root: |
| 1036 | if (ret) { |
| 1037 | free_extent_buffer(quota_root->node); |
| 1038 | free_extent_buffer(quota_root->commit_root); |
| 1039 | kfree(quota_root); |
| 1040 | } |
| 1041 | out: |
Jan Schmidt | eb1716a | 2013-05-28 15:47:23 +0000 | [diff] [blame] | 1042 | if (ret) { |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 1043 | ulist_free(fs_info->qgroup_ulist); |
Jan Schmidt | eb1716a | 2013-05-28 15:47:23 +0000 | [diff] [blame] | 1044 | fs_info->qgroup_ulist = NULL; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1045 | if (trans) |
| 1046 | btrfs_end_transaction(trans); |
Jan Schmidt | eb1716a | 2013-05-28 15:47:23 +0000 | [diff] [blame] | 1047 | } |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1048 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1049 | return ret; |
| 1050 | } |
| 1051 | |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1052 | int btrfs_quota_disable(struct btrfs_fs_info *fs_info) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1053 | { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1054 | struct btrfs_root *quota_root; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1055 | struct btrfs_trans_handle *trans = NULL; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1056 | int ret = 0; |
| 1057 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1058 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
Wang Shilong | 58400fc | 2013-04-07 10:50:17 +0000 | [diff] [blame] | 1059 | if (!fs_info->quota_root) |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1060 | goto out; |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1061 | |
| 1062 | /* |
| 1063 | * 1 For the root item |
| 1064 | * |
| 1065 | * We should also reserve enough items for the quota tree deletion in |
| 1066 | * btrfs_clean_quota_tree but this is not done. |
| 1067 | */ |
| 1068 | trans = btrfs_start_transaction(fs_info->tree_root, 1); |
| 1069 | if (IS_ERR(trans)) { |
| 1070 | ret = PTR_ERR(trans); |
| 1071 | goto out; |
| 1072 | } |
| 1073 | |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 1074 | clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); |
Jeff Mahoney | d06f23d | 2016-08-08 22:08:06 -0400 | [diff] [blame] | 1075 | btrfs_qgroup_wait_for_completion(fs_info, false); |
Justin Maggard | 967ef51 | 2015-11-06 10:36:42 -0800 | [diff] [blame] | 1076 | spin_lock(&fs_info->qgroup_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1077 | quota_root = fs_info->quota_root; |
| 1078 | fs_info->quota_root = NULL; |
Dongsheng Yang | 8ea0ec9 | 2015-02-27 16:24:26 +0800 | [diff] [blame] | 1079 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1080 | spin_unlock(&fs_info->qgroup_lock); |
| 1081 | |
Wang Shilong | e685da1 | 2013-08-14 09:13:37 +0800 | [diff] [blame] | 1082 | btrfs_free_qgroup_config(fs_info); |
| 1083 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1084 | ret = btrfs_clean_quota_tree(trans, quota_root); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1085 | if (ret) { |
| 1086 | btrfs_abort_transaction(trans, ret); |
| 1087 | goto end_trans; |
| 1088 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1089 | |
Jeff Mahoney | 1cd5447 | 2017-08-17 10:25:11 -0400 | [diff] [blame] | 1090 | ret = btrfs_del_root(trans, fs_info, "a_root->root_key); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1091 | if (ret) { |
| 1092 | btrfs_abort_transaction(trans, ret); |
| 1093 | goto end_trans; |
| 1094 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1095 | |
| 1096 | list_del("a_root->dirty_list); |
| 1097 | |
| 1098 | btrfs_tree_lock(quota_root->node); |
David Sterba | 7c302b4 | 2017-02-10 18:47:57 +0100 | [diff] [blame] | 1099 | clean_tree_block(fs_info, quota_root->node); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1100 | btrfs_tree_unlock(quota_root->node); |
| 1101 | btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1); |
| 1102 | |
| 1103 | free_extent_buffer(quota_root->node); |
| 1104 | free_extent_buffer(quota_root->commit_root); |
| 1105 | kfree(quota_root); |
Nikolay Borisov | 340f1aa | 2018-07-05 14:50:48 +0300 | [diff] [blame] | 1106 | |
| 1107 | end_trans: |
| 1108 | ret = btrfs_end_transaction(trans); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1109 | out: |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1110 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1111 | return ret; |
| 1112 | } |
| 1113 | |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 1114 | static void qgroup_dirty(struct btrfs_fs_info *fs_info, |
| 1115 | struct btrfs_qgroup *qgroup) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1116 | { |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 1117 | if (list_empty(&qgroup->dirty)) |
| 1118 | list_add(&qgroup->dirty, &fs_info->dirty_qgroups); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1119 | } |
| 1120 | |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1121 | /* |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1122 | * The easy accounting, we're updating qgroup relationship whose child qgroup |
| 1123 | * only has exclusive extents. |
| 1124 | * |
| 1125 | * In this case, all exclsuive extents will also be exlusive for parent, so |
| 1126 | * excl/rfer just get added/removed. |
| 1127 | * |
| 1128 | * So is qgroup reservation space, which should also be added/removed to |
| 1129 | * parent. |
| 1130 | * Or when child tries to release reservation space, parent will underflow its |
| 1131 | * reservation (for relationship adding case). |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1132 | * |
| 1133 | * Caller should hold fs_info->qgroup_lock. |
| 1134 | */ |
| 1135 | static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info, |
| 1136 | struct ulist *tmp, u64 ref_root, |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1137 | struct btrfs_qgroup *src, int sign) |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1138 | { |
| 1139 | struct btrfs_qgroup *qgroup; |
| 1140 | struct btrfs_qgroup_list *glist; |
| 1141 | struct ulist_node *unode; |
| 1142 | struct ulist_iterator uiter; |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1143 | u64 num_bytes = src->excl; |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1144 | int ret = 0; |
| 1145 | |
| 1146 | qgroup = find_qgroup_rb(fs_info, ref_root); |
| 1147 | if (!qgroup) |
| 1148 | goto out; |
| 1149 | |
| 1150 | qgroup->rfer += sign * num_bytes; |
| 1151 | qgroup->rfer_cmpr += sign * num_bytes; |
| 1152 | |
| 1153 | WARN_ON(sign < 0 && qgroup->excl < num_bytes); |
| 1154 | qgroup->excl += sign * num_bytes; |
| 1155 | qgroup->excl_cmpr += sign * num_bytes; |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1156 | |
| 1157 | if (sign > 0) |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 1158 | qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1159 | else |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 1160 | qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1161 | |
| 1162 | qgroup_dirty(fs_info, qgroup); |
| 1163 | |
| 1164 | /* Get all of the parent groups that contain this qgroup */ |
| 1165 | list_for_each_entry(glist, &qgroup->groups, next_group) { |
| 1166 | ret = ulist_add(tmp, glist->group->qgroupid, |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1167 | qgroup_to_aux(glist->group), GFP_ATOMIC); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1168 | if (ret < 0) |
| 1169 | goto out; |
| 1170 | } |
| 1171 | |
| 1172 | /* Iterate all of the parents and adjust their reference counts */ |
| 1173 | ULIST_ITER_INIT(&uiter); |
| 1174 | while ((unode = ulist_next(tmp, &uiter))) { |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1175 | qgroup = unode_aux_to_qgroup(unode); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1176 | qgroup->rfer += sign * num_bytes; |
| 1177 | qgroup->rfer_cmpr += sign * num_bytes; |
| 1178 | WARN_ON(sign < 0 && qgroup->excl < num_bytes); |
| 1179 | qgroup->excl += sign * num_bytes; |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1180 | if (sign > 0) |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 1181 | qgroup_rsv_add_by_qgroup(fs_info, qgroup, src); |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1182 | else |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 1183 | qgroup_rsv_release_by_qgroup(fs_info, qgroup, src); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1184 | qgroup->excl_cmpr += sign * num_bytes; |
| 1185 | qgroup_dirty(fs_info, qgroup); |
| 1186 | |
| 1187 | /* Add any parents of the parents */ |
| 1188 | list_for_each_entry(glist, &qgroup->groups, next_group) { |
| 1189 | ret = ulist_add(tmp, glist->group->qgroupid, |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1190 | qgroup_to_aux(glist->group), GFP_ATOMIC); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1191 | if (ret < 0) |
| 1192 | goto out; |
| 1193 | } |
| 1194 | } |
| 1195 | ret = 0; |
| 1196 | out: |
| 1197 | return ret; |
| 1198 | } |
| 1199 | |
| 1200 | |
| 1201 | /* |
| 1202 | * Quick path for updating qgroup with only excl refs. |
| 1203 | * |
| 1204 | * In that case, just update all parent will be enough. |
| 1205 | * Or we needs to do a full rescan. |
| 1206 | * Caller should also hold fs_info->qgroup_lock. |
| 1207 | * |
| 1208 | * Return 0 for quick update, return >0 for need to full rescan |
| 1209 | * and mark INCONSISTENT flag. |
| 1210 | * Return < 0 for other error. |
| 1211 | */ |
| 1212 | static int quick_update_accounting(struct btrfs_fs_info *fs_info, |
| 1213 | struct ulist *tmp, u64 src, u64 dst, |
| 1214 | int sign) |
| 1215 | { |
| 1216 | struct btrfs_qgroup *qgroup; |
| 1217 | int ret = 1; |
| 1218 | int err = 0; |
| 1219 | |
| 1220 | qgroup = find_qgroup_rb(fs_info, src); |
| 1221 | if (!qgroup) |
| 1222 | goto out; |
| 1223 | if (qgroup->excl == qgroup->rfer) { |
| 1224 | ret = 0; |
| 1225 | err = __qgroup_excl_accounting(fs_info, tmp, dst, |
Qu Wenruo | 429d627 | 2017-12-12 15:34:26 +0800 | [diff] [blame] | 1226 | qgroup, sign); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1227 | if (err < 0) { |
| 1228 | ret = err; |
| 1229 | goto out; |
| 1230 | } |
| 1231 | } |
| 1232 | out: |
| 1233 | if (ret) |
| 1234 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 1235 | return ret; |
| 1236 | } |
| 1237 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1238 | int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans, |
| 1239 | struct btrfs_fs_info *fs_info, u64 src, u64 dst) |
| 1240 | { |
| 1241 | struct btrfs_root *quota_root; |
Wang Shilong | b7fef4f | 2013-04-07 10:50:18 +0000 | [diff] [blame] | 1242 | struct btrfs_qgroup *parent; |
| 1243 | struct btrfs_qgroup *member; |
Wang Shilong | 534e662 | 2013-04-17 14:49:51 +0000 | [diff] [blame] | 1244 | struct btrfs_qgroup_list *list; |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1245 | struct ulist *tmp; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1246 | int ret = 0; |
| 1247 | |
Qu Wenruo | 8465ece | 2015-02-27 16:24:22 +0800 | [diff] [blame] | 1248 | /* Check the level of src and dst first */ |
| 1249 | if (btrfs_qgroup_level(src) >= btrfs_qgroup_level(dst)) |
| 1250 | return -EINVAL; |
| 1251 | |
David Sterba | 6602caf | 2017-02-13 12:41:02 +0100 | [diff] [blame] | 1252 | tmp = ulist_alloc(GFP_KERNEL); |
Christian Engelmayer | ab3680d | 2015-05-02 17:19:55 +0200 | [diff] [blame] | 1253 | if (!tmp) |
| 1254 | return -ENOMEM; |
| 1255 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1256 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1257 | quota_root = fs_info->quota_root; |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1258 | if (!quota_root) { |
| 1259 | ret = -EINVAL; |
| 1260 | goto out; |
| 1261 | } |
Wang Shilong | b7fef4f | 2013-04-07 10:50:18 +0000 | [diff] [blame] | 1262 | member = find_qgroup_rb(fs_info, src); |
| 1263 | parent = find_qgroup_rb(fs_info, dst); |
| 1264 | if (!member || !parent) { |
| 1265 | ret = -EINVAL; |
| 1266 | goto out; |
| 1267 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1268 | |
Wang Shilong | 534e662 | 2013-04-17 14:49:51 +0000 | [diff] [blame] | 1269 | /* check if such qgroup relation exist firstly */ |
| 1270 | list_for_each_entry(list, &member->groups, next_group) { |
| 1271 | if (list->group == parent) { |
| 1272 | ret = -EEXIST; |
| 1273 | goto out; |
| 1274 | } |
| 1275 | } |
| 1276 | |
Lu Fengqi | 711169c | 2018-07-18 14:45:24 +0800 | [diff] [blame^] | 1277 | ret = add_qgroup_relation_item(trans, src, dst); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1278 | if (ret) |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1279 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1280 | |
Lu Fengqi | 711169c | 2018-07-18 14:45:24 +0800 | [diff] [blame^] | 1281 | ret = add_qgroup_relation_item(trans, dst, src); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1282 | if (ret) { |
| 1283 | del_qgroup_relation_item(trans, quota_root, src, dst); |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1284 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1285 | } |
| 1286 | |
| 1287 | spin_lock(&fs_info->qgroup_lock); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1288 | ret = add_relation_rb(fs_info, src, dst); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1289 | if (ret < 0) { |
| 1290 | spin_unlock(&fs_info->qgroup_lock); |
| 1291 | goto out; |
| 1292 | } |
| 1293 | ret = quick_update_accounting(fs_info, tmp, src, dst, 1); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1294 | spin_unlock(&fs_info->qgroup_lock); |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1295 | out: |
| 1296 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1297 | ulist_free(tmp); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1298 | return ret; |
| 1299 | } |
| 1300 | |
David Sterba | 025db91 | 2017-02-13 13:00:51 +0100 | [diff] [blame] | 1301 | static int __del_qgroup_relation(struct btrfs_trans_handle *trans, |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1302 | struct btrfs_fs_info *fs_info, u64 src, u64 dst) |
| 1303 | { |
| 1304 | struct btrfs_root *quota_root; |
Wang Shilong | 534e662 | 2013-04-17 14:49:51 +0000 | [diff] [blame] | 1305 | struct btrfs_qgroup *parent; |
| 1306 | struct btrfs_qgroup *member; |
| 1307 | struct btrfs_qgroup_list *list; |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1308 | struct ulist *tmp; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1309 | int ret = 0; |
| 1310 | int err; |
| 1311 | |
David Sterba | 6602caf | 2017-02-13 12:41:02 +0100 | [diff] [blame] | 1312 | tmp = ulist_alloc(GFP_KERNEL); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1313 | if (!tmp) |
| 1314 | return -ENOMEM; |
| 1315 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1316 | quota_root = fs_info->quota_root; |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1317 | if (!quota_root) { |
| 1318 | ret = -EINVAL; |
| 1319 | goto out; |
| 1320 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1321 | |
Wang Shilong | 534e662 | 2013-04-17 14:49:51 +0000 | [diff] [blame] | 1322 | member = find_qgroup_rb(fs_info, src); |
| 1323 | parent = find_qgroup_rb(fs_info, dst); |
| 1324 | if (!member || !parent) { |
| 1325 | ret = -EINVAL; |
| 1326 | goto out; |
| 1327 | } |
| 1328 | |
| 1329 | /* check if such qgroup relation exist firstly */ |
| 1330 | list_for_each_entry(list, &member->groups, next_group) { |
| 1331 | if (list->group == parent) |
| 1332 | goto exist; |
| 1333 | } |
| 1334 | ret = -ENOENT; |
| 1335 | goto out; |
| 1336 | exist: |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1337 | ret = del_qgroup_relation_item(trans, quota_root, src, dst); |
| 1338 | err = del_qgroup_relation_item(trans, quota_root, dst, src); |
| 1339 | if (err && !ret) |
| 1340 | ret = err; |
| 1341 | |
| 1342 | spin_lock(&fs_info->qgroup_lock); |
| 1343 | del_relation_rb(fs_info, src, dst); |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1344 | ret = quick_update_accounting(fs_info, tmp, src, dst, -1); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1345 | spin_unlock(&fs_info->qgroup_lock); |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1346 | out: |
Qu Wenruo | 9c8b35b | 2015-02-27 16:24:27 +0800 | [diff] [blame] | 1347 | ulist_free(tmp); |
Dongsheng Yang | f5a6b1c | 2014-11-24 10:27:09 -0500 | [diff] [blame] | 1348 | return ret; |
| 1349 | } |
| 1350 | |
| 1351 | int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans, |
| 1352 | struct btrfs_fs_info *fs_info, u64 src, u64 dst) |
| 1353 | { |
| 1354 | int ret = 0; |
| 1355 | |
| 1356 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
| 1357 | ret = __del_qgroup_relation(trans, fs_info, src, dst); |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1358 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Dongsheng Yang | f5a6b1c | 2014-11-24 10:27:09 -0500 | [diff] [blame] | 1359 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1360 | return ret; |
| 1361 | } |
| 1362 | |
| 1363 | int btrfs_create_qgroup(struct btrfs_trans_handle *trans, |
Dongsheng Yang | 4087cf2 | 2015-01-18 10:59:23 -0500 | [diff] [blame] | 1364 | struct btrfs_fs_info *fs_info, u64 qgroupid) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1365 | { |
| 1366 | struct btrfs_root *quota_root; |
| 1367 | struct btrfs_qgroup *qgroup; |
| 1368 | int ret = 0; |
| 1369 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1370 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1371 | quota_root = fs_info->quota_root; |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1372 | if (!quota_root) { |
| 1373 | ret = -EINVAL; |
| 1374 | goto out; |
| 1375 | } |
Wang Shilong | 534e662 | 2013-04-17 14:49:51 +0000 | [diff] [blame] | 1376 | qgroup = find_qgroup_rb(fs_info, qgroupid); |
| 1377 | if (qgroup) { |
| 1378 | ret = -EEXIST; |
| 1379 | goto out; |
| 1380 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1381 | |
| 1382 | ret = add_qgroup_item(trans, quota_root, qgroupid); |
Wang Shilong | 534e662 | 2013-04-17 14:49:51 +0000 | [diff] [blame] | 1383 | if (ret) |
| 1384 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1385 | |
| 1386 | spin_lock(&fs_info->qgroup_lock); |
| 1387 | qgroup = add_qgroup_rb(fs_info, qgroupid); |
| 1388 | spin_unlock(&fs_info->qgroup_lock); |
| 1389 | |
| 1390 | if (IS_ERR(qgroup)) |
| 1391 | ret = PTR_ERR(qgroup); |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1392 | out: |
| 1393 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1394 | return ret; |
| 1395 | } |
| 1396 | |
| 1397 | int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, |
| 1398 | struct btrfs_fs_info *fs_info, u64 qgroupid) |
| 1399 | { |
| 1400 | struct btrfs_root *quota_root; |
Arne Jansen | 2cf6870 | 2013-01-17 01:22:09 -0700 | [diff] [blame] | 1401 | struct btrfs_qgroup *qgroup; |
Dongsheng Yang | f5a6b1c | 2014-11-24 10:27:09 -0500 | [diff] [blame] | 1402 | struct btrfs_qgroup_list *list; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1403 | int ret = 0; |
| 1404 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1405 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1406 | quota_root = fs_info->quota_root; |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1407 | if (!quota_root) { |
| 1408 | ret = -EINVAL; |
| 1409 | goto out; |
| 1410 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1411 | |
Arne Jansen | 2cf6870 | 2013-01-17 01:22:09 -0700 | [diff] [blame] | 1412 | qgroup = find_qgroup_rb(fs_info, qgroupid); |
Wang Shilong | 534e662 | 2013-04-17 14:49:51 +0000 | [diff] [blame] | 1413 | if (!qgroup) { |
| 1414 | ret = -ENOENT; |
| 1415 | goto out; |
| 1416 | } else { |
Dongsheng Yang | f5a6b1c | 2014-11-24 10:27:09 -0500 | [diff] [blame] | 1417 | /* check if there are no children of this qgroup */ |
| 1418 | if (!list_empty(&qgroup->members)) { |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1419 | ret = -EBUSY; |
| 1420 | goto out; |
Arne Jansen | 2cf6870 | 2013-01-17 01:22:09 -0700 | [diff] [blame] | 1421 | } |
| 1422 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1423 | ret = del_qgroup_item(trans, quota_root, qgroupid); |
Sargun Dhillon | 36b96fd | 2017-09-17 09:02:29 +0000 | [diff] [blame] | 1424 | if (ret && ret != -ENOENT) |
| 1425 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1426 | |
Dongsheng Yang | f5a6b1c | 2014-11-24 10:27:09 -0500 | [diff] [blame] | 1427 | while (!list_empty(&qgroup->groups)) { |
| 1428 | list = list_first_entry(&qgroup->groups, |
| 1429 | struct btrfs_qgroup_list, next_group); |
| 1430 | ret = __del_qgroup_relation(trans, fs_info, |
| 1431 | qgroupid, |
| 1432 | list->group->qgroupid); |
| 1433 | if (ret) |
| 1434 | goto out; |
| 1435 | } |
| 1436 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1437 | spin_lock(&fs_info->qgroup_lock); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1438 | del_qgroup_rb(fs_info, qgroupid); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1439 | spin_unlock(&fs_info->qgroup_lock); |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1440 | out: |
| 1441 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1442 | return ret; |
| 1443 | } |
| 1444 | |
| 1445 | int btrfs_limit_qgroup(struct btrfs_trans_handle *trans, |
| 1446 | struct btrfs_fs_info *fs_info, u64 qgroupid, |
| 1447 | struct btrfs_qgroup_limit *limit) |
| 1448 | { |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1449 | struct btrfs_root *quota_root; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1450 | struct btrfs_qgroup *qgroup; |
| 1451 | int ret = 0; |
Yang Dongsheng | fe75990 | 2015-06-03 14:57:32 +0800 | [diff] [blame] | 1452 | /* Sometimes we would want to clear the limit on this qgroup. |
| 1453 | * To meet this requirement, we treat the -1 as a special value |
| 1454 | * which tell kernel to clear the limit on this qgroup. |
| 1455 | */ |
| 1456 | const u64 CLEAR_VALUE = -1; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1457 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1458 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
| 1459 | quota_root = fs_info->quota_root; |
| 1460 | if (!quota_root) { |
| 1461 | ret = -EINVAL; |
| 1462 | goto out; |
| 1463 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1464 | |
Wang Shilong | ddb47af | 2013-04-07 10:50:20 +0000 | [diff] [blame] | 1465 | qgroup = find_qgroup_rb(fs_info, qgroupid); |
| 1466 | if (!qgroup) { |
| 1467 | ret = -ENOENT; |
| 1468 | goto out; |
| 1469 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1470 | |
Wang Shilong | 58400fc | 2013-04-07 10:50:17 +0000 | [diff] [blame] | 1471 | spin_lock(&fs_info->qgroup_lock); |
Yang Dongsheng | fe75990 | 2015-06-03 14:57:32 +0800 | [diff] [blame] | 1472 | if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_RFER) { |
| 1473 | if (limit->max_rfer == CLEAR_VALUE) { |
| 1474 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; |
| 1475 | limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_RFER; |
| 1476 | qgroup->max_rfer = 0; |
| 1477 | } else { |
| 1478 | qgroup->max_rfer = limit->max_rfer; |
| 1479 | } |
| 1480 | } |
| 1481 | if (limit->flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) { |
| 1482 | if (limit->max_excl == CLEAR_VALUE) { |
| 1483 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; |
| 1484 | limit->flags &= ~BTRFS_QGROUP_LIMIT_MAX_EXCL; |
| 1485 | qgroup->max_excl = 0; |
| 1486 | } else { |
| 1487 | qgroup->max_excl = limit->max_excl; |
| 1488 | } |
| 1489 | } |
| 1490 | if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_RFER) { |
| 1491 | if (limit->rsv_rfer == CLEAR_VALUE) { |
| 1492 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; |
| 1493 | limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_RFER; |
| 1494 | qgroup->rsv_rfer = 0; |
| 1495 | } else { |
| 1496 | qgroup->rsv_rfer = limit->rsv_rfer; |
| 1497 | } |
| 1498 | } |
| 1499 | if (limit->flags & BTRFS_QGROUP_LIMIT_RSV_EXCL) { |
| 1500 | if (limit->rsv_excl == CLEAR_VALUE) { |
| 1501 | qgroup->lim_flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; |
| 1502 | limit->flags &= ~BTRFS_QGROUP_LIMIT_RSV_EXCL; |
| 1503 | qgroup->rsv_excl = 0; |
| 1504 | } else { |
| 1505 | qgroup->rsv_excl = limit->rsv_excl; |
| 1506 | } |
| 1507 | } |
Dongsheng Yang | 03477d9 | 2015-02-06 11:06:25 -0500 | [diff] [blame] | 1508 | qgroup->lim_flags |= limit->flags; |
| 1509 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1510 | spin_unlock(&fs_info->qgroup_lock); |
Dongsheng Yang | 1510e71 | 2014-11-20 21:01:41 -0500 | [diff] [blame] | 1511 | |
| 1512 | ret = update_qgroup_limit_item(trans, quota_root, qgroup); |
| 1513 | if (ret) { |
| 1514 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 1515 | btrfs_info(fs_info, "unable to update quota limit for %llu", |
| 1516 | qgroupid); |
| 1517 | } |
| 1518 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 1519 | out: |
| 1520 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 1521 | return ret; |
| 1522 | } |
Mark Fasheh | 1152651 | 2014-07-17 12:39:01 -0700 | [diff] [blame] | 1523 | |
Qu Wenruo | 50b3e04 | 2016-10-18 09:31:27 +0800 | [diff] [blame] | 1524 | int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info, |
Qu Wenruo | cb93b52 | 2016-08-15 10:36:50 +0800 | [diff] [blame] | 1525 | struct btrfs_delayed_ref_root *delayed_refs, |
| 1526 | struct btrfs_qgroup_extent_record *record) |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 1527 | { |
| 1528 | struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node; |
| 1529 | struct rb_node *parent_node = NULL; |
| 1530 | struct btrfs_qgroup_extent_record *entry; |
| 1531 | u64 bytenr = record->bytenr; |
| 1532 | |
David Sterba | a4666e6 | 2018-03-16 02:21:22 +0100 | [diff] [blame] | 1533 | lockdep_assert_held(&delayed_refs->lock); |
Qu Wenruo | 50b3e04 | 2016-10-18 09:31:27 +0800 | [diff] [blame] | 1534 | trace_btrfs_qgroup_trace_extent(fs_info, record); |
Mark Fasheh | 82bd101 | 2015-11-05 14:38:00 -0800 | [diff] [blame] | 1535 | |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 1536 | while (*p) { |
| 1537 | parent_node = *p; |
| 1538 | entry = rb_entry(parent_node, struct btrfs_qgroup_extent_record, |
| 1539 | node); |
| 1540 | if (bytenr < entry->bytenr) |
| 1541 | p = &(*p)->rb_left; |
| 1542 | else if (bytenr > entry->bytenr) |
| 1543 | p = &(*p)->rb_right; |
| 1544 | else |
Qu Wenruo | cb93b52 | 2016-08-15 10:36:50 +0800 | [diff] [blame] | 1545 | return 1; |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 1546 | } |
| 1547 | |
| 1548 | rb_link_node(&record->node, parent_node, p); |
| 1549 | rb_insert_color(&record->node, &delayed_refs->dirty_extent_root); |
Qu Wenruo | cb93b52 | 2016-08-15 10:36:50 +0800 | [diff] [blame] | 1550 | return 0; |
| 1551 | } |
| 1552 | |
Qu Wenruo | fb235dc | 2017-02-15 10:43:03 +0800 | [diff] [blame] | 1553 | int btrfs_qgroup_trace_extent_post(struct btrfs_fs_info *fs_info, |
| 1554 | struct btrfs_qgroup_extent_record *qrecord) |
| 1555 | { |
| 1556 | struct ulist *old_root; |
| 1557 | u64 bytenr = qrecord->bytenr; |
| 1558 | int ret; |
| 1559 | |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 1560 | ret = btrfs_find_all_roots(NULL, fs_info, bytenr, 0, &old_root, false); |
Nikolay Borisov | 952bd3db | 2018-01-29 15:53:01 +0200 | [diff] [blame] | 1561 | if (ret < 0) { |
| 1562 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 1563 | btrfs_warn(fs_info, |
| 1564 | "error accounting new delayed refs extent (err code: %d), quota inconsistent", |
| 1565 | ret); |
| 1566 | return 0; |
| 1567 | } |
Qu Wenruo | fb235dc | 2017-02-15 10:43:03 +0800 | [diff] [blame] | 1568 | |
| 1569 | /* |
| 1570 | * Here we don't need to get the lock of |
| 1571 | * trans->transaction->delayed_refs, since inserted qrecord won't |
| 1572 | * be deleted, only qrecord->node may be modified (new qrecord insert) |
| 1573 | * |
| 1574 | * So modifying qrecord->old_roots is safe here |
| 1575 | */ |
| 1576 | qrecord->old_roots = old_root; |
| 1577 | return 0; |
| 1578 | } |
| 1579 | |
Qu Wenruo | 50b3e04 | 2016-10-18 09:31:27 +0800 | [diff] [blame] | 1580 | int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans, |
Qu Wenruo | cb93b52 | 2016-08-15 10:36:50 +0800 | [diff] [blame] | 1581 | struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes, |
| 1582 | gfp_t gfp_flag) |
| 1583 | { |
| 1584 | struct btrfs_qgroup_extent_record *record; |
| 1585 | struct btrfs_delayed_ref_root *delayed_refs; |
| 1586 | int ret; |
| 1587 | |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 1588 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) |
| 1589 | || bytenr == 0 || num_bytes == 0) |
Qu Wenruo | cb93b52 | 2016-08-15 10:36:50 +0800 | [diff] [blame] | 1590 | return 0; |
| 1591 | if (WARN_ON(trans == NULL)) |
| 1592 | return -EINVAL; |
| 1593 | record = kmalloc(sizeof(*record), gfp_flag); |
| 1594 | if (!record) |
| 1595 | return -ENOMEM; |
| 1596 | |
| 1597 | delayed_refs = &trans->transaction->delayed_refs; |
| 1598 | record->bytenr = bytenr; |
| 1599 | record->num_bytes = num_bytes; |
| 1600 | record->old_roots = NULL; |
| 1601 | |
| 1602 | spin_lock(&delayed_refs->lock); |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 1603 | ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record); |
Qu Wenruo | cb93b52 | 2016-08-15 10:36:50 +0800 | [diff] [blame] | 1604 | spin_unlock(&delayed_refs->lock); |
Qu Wenruo | fb235dc | 2017-02-15 10:43:03 +0800 | [diff] [blame] | 1605 | if (ret > 0) { |
Qu Wenruo | cb93b52 | 2016-08-15 10:36:50 +0800 | [diff] [blame] | 1606 | kfree(record); |
Qu Wenruo | fb235dc | 2017-02-15 10:43:03 +0800 | [diff] [blame] | 1607 | return 0; |
| 1608 | } |
| 1609 | return btrfs_qgroup_trace_extent_post(fs_info, record); |
Qu Wenruo | 3368d00 | 2015-04-16 14:34:17 +0800 | [diff] [blame] | 1610 | } |
| 1611 | |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1612 | int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans, |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 1613 | struct btrfs_fs_info *fs_info, |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1614 | struct extent_buffer *eb) |
| 1615 | { |
| 1616 | int nr = btrfs_header_nritems(eb); |
| 1617 | int i, extent_type, ret; |
| 1618 | struct btrfs_key key; |
| 1619 | struct btrfs_file_extent_item *fi; |
| 1620 | u64 bytenr, num_bytes; |
| 1621 | |
| 1622 | /* We can be called directly from walk_up_proc() */ |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1623 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1624 | return 0; |
| 1625 | |
| 1626 | for (i = 0; i < nr; i++) { |
| 1627 | btrfs_item_key_to_cpu(eb, &key, i); |
| 1628 | |
| 1629 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
| 1630 | continue; |
| 1631 | |
| 1632 | fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); |
| 1633 | /* filter out non qgroup-accountable extents */ |
| 1634 | extent_type = btrfs_file_extent_type(eb, fi); |
| 1635 | |
| 1636 | if (extent_type == BTRFS_FILE_EXTENT_INLINE) |
| 1637 | continue; |
| 1638 | |
| 1639 | bytenr = btrfs_file_extent_disk_bytenr(eb, fi); |
| 1640 | if (!bytenr) |
| 1641 | continue; |
| 1642 | |
| 1643 | num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi); |
| 1644 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1645 | ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr, |
| 1646 | num_bytes, GFP_NOFS); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1647 | if (ret) |
| 1648 | return ret; |
| 1649 | } |
Jeff Mahoney | cddf3b2 | 2017-06-20 08:15:26 -0400 | [diff] [blame] | 1650 | cond_resched(); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1651 | return 0; |
| 1652 | } |
| 1653 | |
| 1654 | /* |
| 1655 | * Walk up the tree from the bottom, freeing leaves and any interior |
| 1656 | * nodes which have had all slots visited. If a node (leaf or |
| 1657 | * interior) is freed, the node above it will have it's slot |
| 1658 | * incremented. The root node will never be freed. |
| 1659 | * |
| 1660 | * At the end of this function, we should have a path which has all |
| 1661 | * slots incremented to the next position for a search. If we need to |
| 1662 | * read a new node it will be NULL and the node above it will have the |
| 1663 | * correct slot selected for a later read. |
| 1664 | * |
| 1665 | * If we increment the root nodes slot counter past the number of |
| 1666 | * elements, 1 is returned to signal completion of the search. |
| 1667 | */ |
David Sterba | 15b3451 | 2017-02-10 20:30:23 +0100 | [diff] [blame] | 1668 | static int adjust_slots_upwards(struct btrfs_path *path, int root_level) |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1669 | { |
| 1670 | int level = 0; |
| 1671 | int nr, slot; |
| 1672 | struct extent_buffer *eb; |
| 1673 | |
| 1674 | if (root_level == 0) |
| 1675 | return 1; |
| 1676 | |
| 1677 | while (level <= root_level) { |
| 1678 | eb = path->nodes[level]; |
| 1679 | nr = btrfs_header_nritems(eb); |
| 1680 | path->slots[level]++; |
| 1681 | slot = path->slots[level]; |
| 1682 | if (slot >= nr || level == 0) { |
| 1683 | /* |
| 1684 | * Don't free the root - we will detect this |
| 1685 | * condition after our loop and return a |
| 1686 | * positive value for caller to stop walking the tree. |
| 1687 | */ |
| 1688 | if (level != root_level) { |
| 1689 | btrfs_tree_unlock_rw(eb, path->locks[level]); |
| 1690 | path->locks[level] = 0; |
| 1691 | |
| 1692 | free_extent_buffer(eb); |
| 1693 | path->nodes[level] = NULL; |
| 1694 | path->slots[level] = 0; |
| 1695 | } |
| 1696 | } else { |
| 1697 | /* |
| 1698 | * We have a valid slot to walk back down |
| 1699 | * from. Stop here so caller can process these |
| 1700 | * new nodes. |
| 1701 | */ |
| 1702 | break; |
| 1703 | } |
| 1704 | |
| 1705 | level++; |
| 1706 | } |
| 1707 | |
| 1708 | eb = path->nodes[root_level]; |
| 1709 | if (path->slots[root_level] >= btrfs_header_nritems(eb)) |
| 1710 | return 1; |
| 1711 | |
| 1712 | return 0; |
| 1713 | } |
| 1714 | |
| 1715 | int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, |
| 1716 | struct btrfs_root *root, |
| 1717 | struct extent_buffer *root_eb, |
| 1718 | u64 root_gen, int root_level) |
| 1719 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1720 | struct btrfs_fs_info *fs_info = root->fs_info; |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1721 | int ret = 0; |
| 1722 | int level; |
| 1723 | struct extent_buffer *eb = root_eb; |
| 1724 | struct btrfs_path *path = NULL; |
| 1725 | |
Nikolay Borisov | b6e6bca | 2017-07-12 09:42:19 +0300 | [diff] [blame] | 1726 | BUG_ON(root_level < 0 || root_level >= BTRFS_MAX_LEVEL); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1727 | BUG_ON(root_eb == NULL); |
| 1728 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1729 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1730 | return 0; |
| 1731 | |
| 1732 | if (!extent_buffer_uptodate(root_eb)) { |
Qu Wenruo | 581c176 | 2018-03-29 09:08:11 +0800 | [diff] [blame] | 1733 | ret = btrfs_read_buffer(root_eb, root_gen, root_level, NULL); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1734 | if (ret) |
| 1735 | goto out; |
| 1736 | } |
| 1737 | |
| 1738 | if (root_level == 0) { |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 1739 | ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1740 | goto out; |
| 1741 | } |
| 1742 | |
| 1743 | path = btrfs_alloc_path(); |
| 1744 | if (!path) |
| 1745 | return -ENOMEM; |
| 1746 | |
| 1747 | /* |
| 1748 | * Walk down the tree. Missing extent blocks are filled in as |
| 1749 | * we go. Metadata is accounted every time we read a new |
| 1750 | * extent block. |
| 1751 | * |
| 1752 | * When we reach a leaf, we account for file extent items in it, |
| 1753 | * walk back up the tree (adjusting slot pointers as we go) |
| 1754 | * and restart the search process. |
| 1755 | */ |
| 1756 | extent_buffer_get(root_eb); /* For path */ |
| 1757 | path->nodes[root_level] = root_eb; |
| 1758 | path->slots[root_level] = 0; |
| 1759 | path->locks[root_level] = 0; /* so release_path doesn't try to unlock */ |
| 1760 | walk_down: |
| 1761 | level = root_level; |
| 1762 | while (level >= 0) { |
| 1763 | if (path->nodes[level] == NULL) { |
Qu Wenruo | 581c176 | 2018-03-29 09:08:11 +0800 | [diff] [blame] | 1764 | struct btrfs_key first_key; |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1765 | int parent_slot; |
| 1766 | u64 child_gen; |
| 1767 | u64 child_bytenr; |
| 1768 | |
| 1769 | /* |
| 1770 | * We need to get child blockptr/gen from parent before |
| 1771 | * we can read it. |
| 1772 | */ |
| 1773 | eb = path->nodes[level + 1]; |
| 1774 | parent_slot = path->slots[level + 1]; |
| 1775 | child_bytenr = btrfs_node_blockptr(eb, parent_slot); |
| 1776 | child_gen = btrfs_node_ptr_generation(eb, parent_slot); |
Qu Wenruo | 581c176 | 2018-03-29 09:08:11 +0800 | [diff] [blame] | 1777 | btrfs_node_key_to_cpu(eb, &first_key, parent_slot); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1778 | |
Qu Wenruo | 581c176 | 2018-03-29 09:08:11 +0800 | [diff] [blame] | 1779 | eb = read_tree_block(fs_info, child_bytenr, child_gen, |
| 1780 | level, &first_key); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1781 | if (IS_ERR(eb)) { |
| 1782 | ret = PTR_ERR(eb); |
| 1783 | goto out; |
| 1784 | } else if (!extent_buffer_uptodate(eb)) { |
| 1785 | free_extent_buffer(eb); |
| 1786 | ret = -EIO; |
| 1787 | goto out; |
| 1788 | } |
| 1789 | |
| 1790 | path->nodes[level] = eb; |
| 1791 | path->slots[level] = 0; |
| 1792 | |
| 1793 | btrfs_tree_read_lock(eb); |
| 1794 | btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
| 1795 | path->locks[level] = BTRFS_READ_LOCK_BLOCKING; |
| 1796 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 1797 | ret = btrfs_qgroup_trace_extent(trans, fs_info, |
| 1798 | child_bytenr, |
| 1799 | fs_info->nodesize, |
| 1800 | GFP_NOFS); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1801 | if (ret) |
| 1802 | goto out; |
| 1803 | } |
| 1804 | |
| 1805 | if (level == 0) { |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 1806 | ret = btrfs_qgroup_trace_leaf_items(trans,fs_info, |
| 1807 | path->nodes[level]); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1808 | if (ret) |
| 1809 | goto out; |
| 1810 | |
| 1811 | /* Nonzero return here means we completed our search */ |
David Sterba | 15b3451 | 2017-02-10 20:30:23 +0100 | [diff] [blame] | 1812 | ret = adjust_slots_upwards(path, root_level); |
Qu Wenruo | 33d1f05 | 2016-10-18 09:31:28 +0800 | [diff] [blame] | 1813 | if (ret) |
| 1814 | break; |
| 1815 | |
| 1816 | /* Restart search with new slots */ |
| 1817 | goto walk_down; |
| 1818 | } |
| 1819 | |
| 1820 | level--; |
| 1821 | } |
| 1822 | |
| 1823 | ret = 0; |
| 1824 | out: |
| 1825 | btrfs_free_path(path); |
| 1826 | |
| 1827 | return ret; |
| 1828 | } |
| 1829 | |
Qu Wenruo | d810ef2 | 2015-04-12 16:52:34 +0800 | [diff] [blame] | 1830 | #define UPDATE_NEW 0 |
| 1831 | #define UPDATE_OLD 1 |
| 1832 | /* |
| 1833 | * Walk all of the roots that points to the bytenr and adjust their refcnts. |
| 1834 | */ |
| 1835 | static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, |
| 1836 | struct ulist *roots, struct ulist *tmp, |
| 1837 | struct ulist *qgroups, u64 seq, int update_old) |
| 1838 | { |
| 1839 | struct ulist_node *unode; |
| 1840 | struct ulist_iterator uiter; |
| 1841 | struct ulist_node *tmp_unode; |
| 1842 | struct ulist_iterator tmp_uiter; |
| 1843 | struct btrfs_qgroup *qg; |
| 1844 | int ret = 0; |
| 1845 | |
| 1846 | if (!roots) |
| 1847 | return 0; |
| 1848 | ULIST_ITER_INIT(&uiter); |
| 1849 | while ((unode = ulist_next(roots, &uiter))) { |
| 1850 | qg = find_qgroup_rb(fs_info, unode->val); |
| 1851 | if (!qg) |
| 1852 | continue; |
| 1853 | |
| 1854 | ulist_reinit(tmp); |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1855 | ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg), |
Qu Wenruo | d810ef2 | 2015-04-12 16:52:34 +0800 | [diff] [blame] | 1856 | GFP_ATOMIC); |
| 1857 | if (ret < 0) |
| 1858 | return ret; |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1859 | ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC); |
Qu Wenruo | d810ef2 | 2015-04-12 16:52:34 +0800 | [diff] [blame] | 1860 | if (ret < 0) |
| 1861 | return ret; |
| 1862 | ULIST_ITER_INIT(&tmp_uiter); |
| 1863 | while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { |
| 1864 | struct btrfs_qgroup_list *glist; |
| 1865 | |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1866 | qg = unode_aux_to_qgroup(tmp_unode); |
Qu Wenruo | d810ef2 | 2015-04-12 16:52:34 +0800 | [diff] [blame] | 1867 | if (update_old) |
| 1868 | btrfs_qgroup_update_old_refcnt(qg, seq, 1); |
| 1869 | else |
| 1870 | btrfs_qgroup_update_new_refcnt(qg, seq, 1); |
| 1871 | list_for_each_entry(glist, &qg->groups, next_group) { |
| 1872 | ret = ulist_add(qgroups, glist->group->qgroupid, |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1873 | qgroup_to_aux(glist->group), |
Qu Wenruo | d810ef2 | 2015-04-12 16:52:34 +0800 | [diff] [blame] | 1874 | GFP_ATOMIC); |
| 1875 | if (ret < 0) |
| 1876 | return ret; |
| 1877 | ret = ulist_add(tmp, glist->group->qgroupid, |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1878 | qgroup_to_aux(glist->group), |
Qu Wenruo | d810ef2 | 2015-04-12 16:52:34 +0800 | [diff] [blame] | 1879 | GFP_ATOMIC); |
| 1880 | if (ret < 0) |
| 1881 | return ret; |
| 1882 | } |
| 1883 | } |
| 1884 | } |
| 1885 | return 0; |
| 1886 | } |
| 1887 | |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 1888 | /* |
Qu Wenruo | 823ae5b | 2015-04-12 16:59:57 +0800 | [diff] [blame] | 1889 | * Update qgroup rfer/excl counters. |
| 1890 | * Rfer update is easy, codes can explain themselves. |
Qu Wenruo | e69bcee | 2015-04-17 10:23:16 +0800 | [diff] [blame] | 1891 | * |
Qu Wenruo | 823ae5b | 2015-04-12 16:59:57 +0800 | [diff] [blame] | 1892 | * Excl update is tricky, the update is split into 2 part. |
| 1893 | * Part 1: Possible exclusive <-> sharing detect: |
| 1894 | * | A | !A | |
| 1895 | * ------------------------------------- |
| 1896 | * B | * | - | |
| 1897 | * ------------------------------------- |
| 1898 | * !B | + | ** | |
| 1899 | * ------------------------------------- |
| 1900 | * |
| 1901 | * Conditions: |
| 1902 | * A: cur_old_roots < nr_old_roots (not exclusive before) |
| 1903 | * !A: cur_old_roots == nr_old_roots (possible exclusive before) |
| 1904 | * B: cur_new_roots < nr_new_roots (not exclusive now) |
Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 1905 | * !B: cur_new_roots == nr_new_roots (possible exclusive now) |
Qu Wenruo | 823ae5b | 2015-04-12 16:59:57 +0800 | [diff] [blame] | 1906 | * |
| 1907 | * Results: |
| 1908 | * +: Possible sharing -> exclusive -: Possible exclusive -> sharing |
| 1909 | * *: Definitely not changed. **: Possible unchanged. |
| 1910 | * |
| 1911 | * For !A and !B condition, the exception is cur_old/new_roots == 0 case. |
| 1912 | * |
| 1913 | * To make the logic clear, we first use condition A and B to split |
| 1914 | * combination into 4 results. |
| 1915 | * |
| 1916 | * Then, for result "+" and "-", check old/new_roots == 0 case, as in them |
| 1917 | * only on variant maybe 0. |
| 1918 | * |
| 1919 | * Lastly, check result **, since there are 2 variants maybe 0, split them |
| 1920 | * again(2x2). |
| 1921 | * But this time we don't need to consider other things, the codes and logic |
| 1922 | * is easy to understand now. |
| 1923 | */ |
| 1924 | static int qgroup_update_counters(struct btrfs_fs_info *fs_info, |
| 1925 | struct ulist *qgroups, |
| 1926 | u64 nr_old_roots, |
| 1927 | u64 nr_new_roots, |
| 1928 | u64 num_bytes, u64 seq) |
| 1929 | { |
| 1930 | struct ulist_node *unode; |
| 1931 | struct ulist_iterator uiter; |
| 1932 | struct btrfs_qgroup *qg; |
| 1933 | u64 cur_new_count, cur_old_count; |
| 1934 | |
| 1935 | ULIST_ITER_INIT(&uiter); |
| 1936 | while ((unode = ulist_next(qgroups, &uiter))) { |
| 1937 | bool dirty = false; |
| 1938 | |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 1939 | qg = unode_aux_to_qgroup(unode); |
Qu Wenruo | 823ae5b | 2015-04-12 16:59:57 +0800 | [diff] [blame] | 1940 | cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); |
| 1941 | cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); |
| 1942 | |
Qu Wenruo | 8b31790 | 2018-04-30 15:04:44 +0800 | [diff] [blame] | 1943 | trace_qgroup_update_counters(fs_info, qg, cur_old_count, |
| 1944 | cur_new_count); |
Mark Fasheh | 0f5dcf8 | 2016-03-29 17:19:55 -0700 | [diff] [blame] | 1945 | |
Qu Wenruo | 823ae5b | 2015-04-12 16:59:57 +0800 | [diff] [blame] | 1946 | /* Rfer update part */ |
| 1947 | if (cur_old_count == 0 && cur_new_count > 0) { |
| 1948 | qg->rfer += num_bytes; |
| 1949 | qg->rfer_cmpr += num_bytes; |
| 1950 | dirty = true; |
| 1951 | } |
| 1952 | if (cur_old_count > 0 && cur_new_count == 0) { |
| 1953 | qg->rfer -= num_bytes; |
| 1954 | qg->rfer_cmpr -= num_bytes; |
| 1955 | dirty = true; |
| 1956 | } |
| 1957 | |
| 1958 | /* Excl update part */ |
| 1959 | /* Exclusive/none -> shared case */ |
| 1960 | if (cur_old_count == nr_old_roots && |
| 1961 | cur_new_count < nr_new_roots) { |
| 1962 | /* Exclusive -> shared */ |
| 1963 | if (cur_old_count != 0) { |
| 1964 | qg->excl -= num_bytes; |
| 1965 | qg->excl_cmpr -= num_bytes; |
| 1966 | dirty = true; |
| 1967 | } |
| 1968 | } |
| 1969 | |
| 1970 | /* Shared -> exclusive/none case */ |
| 1971 | if (cur_old_count < nr_old_roots && |
| 1972 | cur_new_count == nr_new_roots) { |
| 1973 | /* Shared->exclusive */ |
| 1974 | if (cur_new_count != 0) { |
| 1975 | qg->excl += num_bytes; |
| 1976 | qg->excl_cmpr += num_bytes; |
| 1977 | dirty = true; |
| 1978 | } |
| 1979 | } |
| 1980 | |
| 1981 | /* Exclusive/none -> exclusive/none case */ |
| 1982 | if (cur_old_count == nr_old_roots && |
| 1983 | cur_new_count == nr_new_roots) { |
| 1984 | if (cur_old_count == 0) { |
| 1985 | /* None -> exclusive/none */ |
| 1986 | |
| 1987 | if (cur_new_count != 0) { |
| 1988 | /* None -> exclusive */ |
| 1989 | qg->excl += num_bytes; |
| 1990 | qg->excl_cmpr += num_bytes; |
| 1991 | dirty = true; |
| 1992 | } |
| 1993 | /* None -> none, nothing changed */ |
| 1994 | } else { |
| 1995 | /* Exclusive -> exclusive/none */ |
| 1996 | |
| 1997 | if (cur_new_count == 0) { |
| 1998 | /* Exclusive -> none */ |
| 1999 | qg->excl -= num_bytes; |
| 2000 | qg->excl_cmpr -= num_bytes; |
| 2001 | dirty = true; |
| 2002 | } |
| 2003 | /* Exclusive -> exclusive, nothing changed */ |
| 2004 | } |
| 2005 | } |
Qu Wenruo | c05f942 | 2015-08-03 14:44:29 +0800 | [diff] [blame] | 2006 | |
Qu Wenruo | 823ae5b | 2015-04-12 16:59:57 +0800 | [diff] [blame] | 2007 | if (dirty) |
| 2008 | qgroup_dirty(fs_info, qg); |
| 2009 | } |
| 2010 | return 0; |
| 2011 | } |
| 2012 | |
Qu Wenruo | 5edfd9f | 2017-02-27 15:10:34 +0800 | [diff] [blame] | 2013 | /* |
| 2014 | * Check if the @roots potentially is a list of fs tree roots |
| 2015 | * |
| 2016 | * Return 0 for definitely not a fs/subvol tree roots ulist |
| 2017 | * Return 1 for possible fs/subvol tree roots in the list (considering an empty |
| 2018 | * one as well) |
| 2019 | */ |
| 2020 | static int maybe_fs_roots(struct ulist *roots) |
| 2021 | { |
| 2022 | struct ulist_node *unode; |
| 2023 | struct ulist_iterator uiter; |
| 2024 | |
| 2025 | /* Empty one, still possible for fs roots */ |
| 2026 | if (!roots || roots->nnodes == 0) |
| 2027 | return 1; |
| 2028 | |
| 2029 | ULIST_ITER_INIT(&uiter); |
| 2030 | unode = ulist_next(roots, &uiter); |
| 2031 | if (!unode) |
| 2032 | return 1; |
| 2033 | |
| 2034 | /* |
| 2035 | * If it contains fs tree roots, then it must belong to fs/subvol |
| 2036 | * trees. |
| 2037 | * If it contains a non-fs tree, it won't be shared with fs/subvol trees. |
| 2038 | */ |
| 2039 | return is_fstree(unode->val); |
| 2040 | } |
| 2041 | |
Qu Wenruo | 442244c | 2015-04-16 17:18:36 +0800 | [diff] [blame] | 2042 | int |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2043 | btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, |
| 2044 | struct btrfs_fs_info *fs_info, |
| 2045 | u64 bytenr, u64 num_bytes, |
| 2046 | struct ulist *old_roots, struct ulist *new_roots) |
| 2047 | { |
| 2048 | struct ulist *qgroups = NULL; |
| 2049 | struct ulist *tmp = NULL; |
| 2050 | u64 seq; |
| 2051 | u64 nr_new_roots = 0; |
| 2052 | u64 nr_old_roots = 0; |
| 2053 | int ret = 0; |
| 2054 | |
David Sterba | 81353d5 | 2017-02-13 14:05:24 +0100 | [diff] [blame] | 2055 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) |
| 2056 | return 0; |
| 2057 | |
Qu Wenruo | 5edfd9f | 2017-02-27 15:10:34 +0800 | [diff] [blame] | 2058 | if (new_roots) { |
| 2059 | if (!maybe_fs_roots(new_roots)) |
| 2060 | goto out_free; |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2061 | nr_new_roots = new_roots->nnodes; |
Qu Wenruo | 5edfd9f | 2017-02-27 15:10:34 +0800 | [diff] [blame] | 2062 | } |
| 2063 | if (old_roots) { |
| 2064 | if (!maybe_fs_roots(old_roots)) |
| 2065 | goto out_free; |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2066 | nr_old_roots = old_roots->nnodes; |
Qu Wenruo | 5edfd9f | 2017-02-27 15:10:34 +0800 | [diff] [blame] | 2067 | } |
| 2068 | |
| 2069 | /* Quick exit, either not fs tree roots, or won't affect any qgroup */ |
| 2070 | if (nr_old_roots == 0 && nr_new_roots == 0) |
| 2071 | goto out_free; |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2072 | |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2073 | BUG_ON(!fs_info->quota_root); |
| 2074 | |
Qu Wenruo | c9f6f3c | 2018-05-03 09:59:02 +0800 | [diff] [blame] | 2075 | trace_btrfs_qgroup_account_extent(fs_info, trans->transid, bytenr, |
| 2076 | num_bytes, nr_old_roots, nr_new_roots); |
Mark Fasheh | 0f5dcf8 | 2016-03-29 17:19:55 -0700 | [diff] [blame] | 2077 | |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2078 | qgroups = ulist_alloc(GFP_NOFS); |
| 2079 | if (!qgroups) { |
| 2080 | ret = -ENOMEM; |
| 2081 | goto out_free; |
| 2082 | } |
| 2083 | tmp = ulist_alloc(GFP_NOFS); |
| 2084 | if (!tmp) { |
| 2085 | ret = -ENOMEM; |
| 2086 | goto out_free; |
| 2087 | } |
| 2088 | |
| 2089 | mutex_lock(&fs_info->qgroup_rescan_lock); |
| 2090 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { |
| 2091 | if (fs_info->qgroup_rescan_progress.objectid <= bytenr) { |
| 2092 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2093 | ret = 0; |
| 2094 | goto out_free; |
| 2095 | } |
| 2096 | } |
| 2097 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2098 | |
| 2099 | spin_lock(&fs_info->qgroup_lock); |
| 2100 | seq = fs_info->qgroup_seq; |
| 2101 | |
| 2102 | /* Update old refcnts using old_roots */ |
| 2103 | ret = qgroup_update_refcnt(fs_info, old_roots, tmp, qgroups, seq, |
| 2104 | UPDATE_OLD); |
| 2105 | if (ret < 0) |
| 2106 | goto out; |
| 2107 | |
| 2108 | /* Update new refcnts using new_roots */ |
| 2109 | ret = qgroup_update_refcnt(fs_info, new_roots, tmp, qgroups, seq, |
| 2110 | UPDATE_NEW); |
| 2111 | if (ret < 0) |
| 2112 | goto out; |
| 2113 | |
| 2114 | qgroup_update_counters(fs_info, qgroups, nr_old_roots, nr_new_roots, |
| 2115 | num_bytes, seq); |
| 2116 | |
| 2117 | /* |
| 2118 | * Bump qgroup_seq to avoid seq overlap |
| 2119 | */ |
| 2120 | fs_info->qgroup_seq += max(nr_old_roots, nr_new_roots) + 1; |
| 2121 | out: |
| 2122 | spin_unlock(&fs_info->qgroup_lock); |
| 2123 | out_free: |
| 2124 | ulist_free(tmp); |
| 2125 | ulist_free(qgroups); |
| 2126 | ulist_free(old_roots); |
| 2127 | ulist_free(new_roots); |
| 2128 | return ret; |
| 2129 | } |
| 2130 | |
Nikolay Borisov | 460fb20 | 2018-03-15 16:00:25 +0200 | [diff] [blame] | 2131 | int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans) |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2132 | { |
Nikolay Borisov | 460fb20 | 2018-03-15 16:00:25 +0200 | [diff] [blame] | 2133 | struct btrfs_fs_info *fs_info = trans->fs_info; |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2134 | struct btrfs_qgroup_extent_record *record; |
| 2135 | struct btrfs_delayed_ref_root *delayed_refs; |
| 2136 | struct ulist *new_roots = NULL; |
| 2137 | struct rb_node *node; |
Qu Wenruo | 9086db8 | 2015-04-20 09:53:50 +0800 | [diff] [blame] | 2138 | u64 qgroup_to_skip; |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2139 | int ret = 0; |
| 2140 | |
| 2141 | delayed_refs = &trans->transaction->delayed_refs; |
Qu Wenruo | 9086db8 | 2015-04-20 09:53:50 +0800 | [diff] [blame] | 2142 | qgroup_to_skip = delayed_refs->qgroup_to_skip; |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2143 | while ((node = rb_first(&delayed_refs->dirty_extent_root))) { |
| 2144 | record = rb_entry(node, struct btrfs_qgroup_extent_record, |
| 2145 | node); |
| 2146 | |
Jeff Mahoney | bc07452 | 2016-06-09 17:27:55 -0400 | [diff] [blame] | 2147 | trace_btrfs_qgroup_account_extents(fs_info, record); |
Mark Fasheh | 0f5dcf8 | 2016-03-29 17:19:55 -0700 | [diff] [blame] | 2148 | |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2149 | if (!ret) { |
| 2150 | /* |
Qu Wenruo | d1b8b94 | 2017-02-27 15:10:35 +0800 | [diff] [blame] | 2151 | * Old roots should be searched when inserting qgroup |
| 2152 | * extent record |
| 2153 | */ |
| 2154 | if (WARN_ON(!record->old_roots)) { |
| 2155 | /* Search commit root to find old_roots */ |
| 2156 | ret = btrfs_find_all_roots(NULL, fs_info, |
| 2157 | record->bytenr, 0, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 2158 | &record->old_roots, false); |
Qu Wenruo | d1b8b94 | 2017-02-27 15:10:35 +0800 | [diff] [blame] | 2159 | if (ret < 0) |
| 2160 | goto cleanup; |
| 2161 | } |
| 2162 | |
| 2163 | /* |
Edmund Nadolski | de47c9d | 2017-03-16 10:04:34 -0600 | [diff] [blame] | 2164 | * Use SEQ_LAST as time_seq to do special search, which |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2165 | * doesn't lock tree or delayed_refs and search current |
| 2166 | * root. It's safe inside commit_transaction(). |
| 2167 | */ |
| 2168 | ret = btrfs_find_all_roots(trans, fs_info, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 2169 | record->bytenr, SEQ_LAST, &new_roots, false); |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2170 | if (ret < 0) |
| 2171 | goto cleanup; |
Qu Wenruo | d1b8b94 | 2017-02-27 15:10:35 +0800 | [diff] [blame] | 2172 | if (qgroup_to_skip) { |
Qu Wenruo | 9086db8 | 2015-04-20 09:53:50 +0800 | [diff] [blame] | 2173 | ulist_del(new_roots, qgroup_to_skip, 0); |
Qu Wenruo | d1b8b94 | 2017-02-27 15:10:35 +0800 | [diff] [blame] | 2174 | ulist_del(record->old_roots, qgroup_to_skip, |
| 2175 | 0); |
| 2176 | } |
Qu Wenruo | 550d7a2 | 2015-04-16 15:37:33 +0800 | [diff] [blame] | 2177 | ret = btrfs_qgroup_account_extent(trans, fs_info, |
| 2178 | record->bytenr, record->num_bytes, |
| 2179 | record->old_roots, new_roots); |
| 2180 | record->old_roots = NULL; |
| 2181 | new_roots = NULL; |
| 2182 | } |
| 2183 | cleanup: |
| 2184 | ulist_free(record->old_roots); |
| 2185 | ulist_free(new_roots); |
| 2186 | new_roots = NULL; |
| 2187 | rb_erase(node, &delayed_refs->dirty_extent_root); |
| 2188 | kfree(record); |
| 2189 | |
| 2190 | } |
| 2191 | return ret; |
| 2192 | } |
| 2193 | |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 2194 | /* |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2195 | * called from commit_transaction. Writes all changed qgroups to disk. |
| 2196 | */ |
| 2197 | int btrfs_run_qgroups(struct btrfs_trans_handle *trans, |
| 2198 | struct btrfs_fs_info *fs_info) |
| 2199 | { |
| 2200 | struct btrfs_root *quota_root = fs_info->quota_root; |
| 2201 | int ret = 0; |
| 2202 | |
| 2203 | if (!quota_root) |
Nikolay Borisov | 5d23515 | 2018-01-31 10:52:04 +0200 | [diff] [blame] | 2204 | return ret; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2205 | |
| 2206 | spin_lock(&fs_info->qgroup_lock); |
| 2207 | while (!list_empty(&fs_info->dirty_qgroups)) { |
| 2208 | struct btrfs_qgroup *qgroup; |
| 2209 | qgroup = list_first_entry(&fs_info->dirty_qgroups, |
| 2210 | struct btrfs_qgroup, dirty); |
| 2211 | list_del_init(&qgroup->dirty); |
| 2212 | spin_unlock(&fs_info->qgroup_lock); |
| 2213 | ret = update_qgroup_info_item(trans, quota_root, qgroup); |
| 2214 | if (ret) |
| 2215 | fs_info->qgroup_flags |= |
| 2216 | BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
Dongsheng Yang | d3001ed | 2014-11-20 21:04:56 -0500 | [diff] [blame] | 2217 | ret = update_qgroup_limit_item(trans, quota_root, qgroup); |
| 2218 | if (ret) |
| 2219 | fs_info->qgroup_flags |= |
| 2220 | BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2221 | spin_lock(&fs_info->qgroup_lock); |
| 2222 | } |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 2223 | if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2224 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_ON; |
| 2225 | else |
| 2226 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_ON; |
| 2227 | spin_unlock(&fs_info->qgroup_lock); |
| 2228 | |
| 2229 | ret = update_qgroup_status_item(trans, fs_info, quota_root); |
| 2230 | if (ret) |
| 2231 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 2232 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2233 | return ret; |
| 2234 | } |
| 2235 | |
| 2236 | /* |
Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 2237 | * Copy the accounting information between qgroups. This is necessary |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2238 | * when a snapshot or a subvolume is created. Throwing an error will |
| 2239 | * cause a transaction abort so we take extra care here to only error |
| 2240 | * when a readonly fs is a reasonable outcome. |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2241 | */ |
| 2242 | int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, |
| 2243 | struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, |
| 2244 | struct btrfs_qgroup_inherit *inherit) |
| 2245 | { |
| 2246 | int ret = 0; |
| 2247 | int i; |
| 2248 | u64 *i_qgroups; |
| 2249 | struct btrfs_root *quota_root = fs_info->quota_root; |
| 2250 | struct btrfs_qgroup *srcgroup; |
| 2251 | struct btrfs_qgroup *dstgroup; |
| 2252 | u32 level_size = 0; |
Wang Shilong | 3f5e2d3 | 2013-04-07 10:50:19 +0000 | [diff] [blame] | 2253 | u64 nums; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2254 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 2255 | mutex_lock(&fs_info->qgroup_ioctl_lock); |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 2256 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 2257 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2258 | |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 2259 | if (!quota_root) { |
| 2260 | ret = -EINVAL; |
| 2261 | goto out; |
| 2262 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2263 | |
Wang Shilong | 3f5e2d3 | 2013-04-07 10:50:19 +0000 | [diff] [blame] | 2264 | if (inherit) { |
| 2265 | i_qgroups = (u64 *)(inherit + 1); |
| 2266 | nums = inherit->num_qgroups + 2 * inherit->num_ref_copies + |
| 2267 | 2 * inherit->num_excl_copies; |
| 2268 | for (i = 0; i < nums; ++i) { |
| 2269 | srcgroup = find_qgroup_rb(fs_info, *i_qgroups); |
Dongsheng Yang | 09870d2 | 2014-11-11 07:18:22 -0500 | [diff] [blame] | 2270 | |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2271 | /* |
| 2272 | * Zero out invalid groups so we can ignore |
| 2273 | * them later. |
| 2274 | */ |
| 2275 | if (!srcgroup || |
| 2276 | ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) |
| 2277 | *i_qgroups = 0ULL; |
| 2278 | |
Wang Shilong | 3f5e2d3 | 2013-04-07 10:50:19 +0000 | [diff] [blame] | 2279 | ++i_qgroups; |
| 2280 | } |
| 2281 | } |
| 2282 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2283 | /* |
| 2284 | * create a tracking group for the subvol itself |
| 2285 | */ |
| 2286 | ret = add_qgroup_item(trans, quota_root, objectid); |
| 2287 | if (ret) |
| 2288 | goto out; |
| 2289 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2290 | /* |
| 2291 | * add qgroup to all inherited groups |
| 2292 | */ |
| 2293 | if (inherit) { |
| 2294 | i_qgroups = (u64 *)(inherit + 1); |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2295 | for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { |
| 2296 | if (*i_qgroups == 0) |
| 2297 | continue; |
Lu Fengqi | 711169c | 2018-07-18 14:45:24 +0800 | [diff] [blame^] | 2298 | ret = add_qgroup_relation_item(trans, objectid, |
| 2299 | *i_qgroups); |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2300 | if (ret && ret != -EEXIST) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2301 | goto out; |
Lu Fengqi | 711169c | 2018-07-18 14:45:24 +0800 | [diff] [blame^] | 2302 | ret = add_qgroup_relation_item(trans, *i_qgroups, |
| 2303 | objectid); |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2304 | if (ret && ret != -EEXIST) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2305 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2306 | } |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2307 | ret = 0; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2308 | } |
| 2309 | |
| 2310 | |
| 2311 | spin_lock(&fs_info->qgroup_lock); |
| 2312 | |
| 2313 | dstgroup = add_qgroup_rb(fs_info, objectid); |
Dan Carpenter | 57a5a88 | 2012-07-30 02:15:43 -0600 | [diff] [blame] | 2314 | if (IS_ERR(dstgroup)) { |
| 2315 | ret = PTR_ERR(dstgroup); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2316 | goto unlock; |
Dan Carpenter | 57a5a88 | 2012-07-30 02:15:43 -0600 | [diff] [blame] | 2317 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2318 | |
Dongsheng Yang | e8c8541 | 2014-11-20 20:58:34 -0500 | [diff] [blame] | 2319 | if (inherit && inherit->flags & BTRFS_QGROUP_INHERIT_SET_LIMITS) { |
Dongsheng Yang | e8c8541 | 2014-11-20 20:58:34 -0500 | [diff] [blame] | 2320 | dstgroup->lim_flags = inherit->lim.flags; |
| 2321 | dstgroup->max_rfer = inherit->lim.max_rfer; |
| 2322 | dstgroup->max_excl = inherit->lim.max_excl; |
| 2323 | dstgroup->rsv_rfer = inherit->lim.rsv_rfer; |
| 2324 | dstgroup->rsv_excl = inherit->lim.rsv_excl; |
Dongsheng Yang | 1510e71 | 2014-11-20 21:01:41 -0500 | [diff] [blame] | 2325 | |
| 2326 | ret = update_qgroup_limit_item(trans, quota_root, dstgroup); |
| 2327 | if (ret) { |
| 2328 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 2329 | btrfs_info(fs_info, |
| 2330 | "unable to update quota limit for %llu", |
| 2331 | dstgroup->qgroupid); |
Dongsheng Yang | 1510e71 | 2014-11-20 21:01:41 -0500 | [diff] [blame] | 2332 | goto unlock; |
| 2333 | } |
Dongsheng Yang | e8c8541 | 2014-11-20 20:58:34 -0500 | [diff] [blame] | 2334 | } |
| 2335 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2336 | if (srcid) { |
| 2337 | srcgroup = find_qgroup_rb(fs_info, srcid); |
Chris Mason | f3a87f1 | 2012-09-14 20:06:30 -0400 | [diff] [blame] | 2338 | if (!srcgroup) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2339 | goto unlock; |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 2340 | |
| 2341 | /* |
| 2342 | * We call inherit after we clone the root in order to make sure |
| 2343 | * our counts don't go crazy, so at this point the only |
| 2344 | * difference between the two roots should be the root node. |
| 2345 | */ |
Lu Fengqi | c8389d4 | 2018-07-17 16:58:22 +0800 | [diff] [blame] | 2346 | level_size = fs_info->nodesize; |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 2347 | dstgroup->rfer = srcgroup->rfer; |
| 2348 | dstgroup->rfer_cmpr = srcgroup->rfer_cmpr; |
| 2349 | dstgroup->excl = level_size; |
| 2350 | dstgroup->excl_cmpr = level_size; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2351 | srcgroup->excl = level_size; |
| 2352 | srcgroup->excl_cmpr = level_size; |
Dongsheng Yang | 3eeb4d5 | 2014-11-20 20:14:38 -0500 | [diff] [blame] | 2353 | |
| 2354 | /* inherit the limit info */ |
| 2355 | dstgroup->lim_flags = srcgroup->lim_flags; |
| 2356 | dstgroup->max_rfer = srcgroup->max_rfer; |
| 2357 | dstgroup->max_excl = srcgroup->max_excl; |
| 2358 | dstgroup->rsv_rfer = srcgroup->rsv_rfer; |
| 2359 | dstgroup->rsv_excl = srcgroup->rsv_excl; |
| 2360 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2361 | qgroup_dirty(fs_info, dstgroup); |
| 2362 | qgroup_dirty(fs_info, srcgroup); |
| 2363 | } |
| 2364 | |
Chris Mason | f3a87f1 | 2012-09-14 20:06:30 -0400 | [diff] [blame] | 2365 | if (!inherit) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2366 | goto unlock; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2367 | |
| 2368 | i_qgroups = (u64 *)(inherit + 1); |
| 2369 | for (i = 0; i < inherit->num_qgroups; ++i) { |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2370 | if (*i_qgroups) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 2371 | ret = add_relation_rb(fs_info, objectid, *i_qgroups); |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2372 | if (ret) |
| 2373 | goto unlock; |
| 2374 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2375 | ++i_qgroups; |
| 2376 | } |
| 2377 | |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2378 | for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2379 | struct btrfs_qgroup *src; |
| 2380 | struct btrfs_qgroup *dst; |
| 2381 | |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2382 | if (!i_qgroups[0] || !i_qgroups[1]) |
| 2383 | continue; |
| 2384 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2385 | src = find_qgroup_rb(fs_info, i_qgroups[0]); |
| 2386 | dst = find_qgroup_rb(fs_info, i_qgroups[1]); |
| 2387 | |
| 2388 | if (!src || !dst) { |
| 2389 | ret = -EINVAL; |
| 2390 | goto unlock; |
| 2391 | } |
| 2392 | |
| 2393 | dst->rfer = src->rfer - level_size; |
| 2394 | dst->rfer_cmpr = src->rfer_cmpr - level_size; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2395 | } |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2396 | for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2397 | struct btrfs_qgroup *src; |
| 2398 | struct btrfs_qgroup *dst; |
| 2399 | |
Mark Fasheh | 918c2ee | 2016-03-30 17:57:48 -0700 | [diff] [blame] | 2400 | if (!i_qgroups[0] || !i_qgroups[1]) |
| 2401 | continue; |
| 2402 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2403 | src = find_qgroup_rb(fs_info, i_qgroups[0]); |
| 2404 | dst = find_qgroup_rb(fs_info, i_qgroups[1]); |
| 2405 | |
| 2406 | if (!src || !dst) { |
| 2407 | ret = -EINVAL; |
| 2408 | goto unlock; |
| 2409 | } |
| 2410 | |
| 2411 | dst->excl = src->excl + level_size; |
| 2412 | dst->excl_cmpr = src->excl_cmpr + level_size; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2413 | } |
| 2414 | |
| 2415 | unlock: |
| 2416 | spin_unlock(&fs_info->qgroup_lock); |
| 2417 | out: |
Wang Shilong | f2f6ed3 | 2013-04-07 10:50:16 +0000 | [diff] [blame] | 2418 | mutex_unlock(&fs_info->qgroup_ioctl_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2419 | return ret; |
| 2420 | } |
| 2421 | |
Qu Wenruo | a514d63 | 2017-12-22 16:06:39 +0800 | [diff] [blame] | 2422 | /* |
| 2423 | * Two limits to commit transaction in advance. |
| 2424 | * |
| 2425 | * For RATIO, it will be 1/RATIO of the remaining limit |
| 2426 | * (excluding data and prealloc meta) as threshold. |
| 2427 | * For SIZE, it will be in byte unit as threshold. |
| 2428 | */ |
| 2429 | #define QGROUP_PERTRANS_RATIO 32 |
| 2430 | #define QGROUP_PERTRANS_SIZE SZ_32M |
| 2431 | static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, |
| 2432 | const struct btrfs_qgroup *qg, u64 num_bytes) |
Jeff Mahoney | 003d7c5 | 2017-01-25 09:50:33 -0500 | [diff] [blame] | 2433 | { |
Qu Wenruo | a514d63 | 2017-12-22 16:06:39 +0800 | [diff] [blame] | 2434 | u64 limit; |
| 2435 | u64 threshold; |
| 2436 | |
Jeff Mahoney | 003d7c5 | 2017-01-25 09:50:33 -0500 | [diff] [blame] | 2437 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && |
Qu Wenruo | dba2132 | 2017-12-12 15:34:25 +0800 | [diff] [blame] | 2438 | qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) |
Jeff Mahoney | 003d7c5 | 2017-01-25 09:50:33 -0500 | [diff] [blame] | 2439 | return false; |
| 2440 | |
| 2441 | if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) && |
Qu Wenruo | dba2132 | 2017-12-12 15:34:25 +0800 | [diff] [blame] | 2442 | qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) |
Jeff Mahoney | 003d7c5 | 2017-01-25 09:50:33 -0500 | [diff] [blame] | 2443 | return false; |
| 2444 | |
Qu Wenruo | a514d63 | 2017-12-22 16:06:39 +0800 | [diff] [blame] | 2445 | /* |
| 2446 | * Even if we passed the check, it's better to check if reservation |
| 2447 | * for meta_pertrans is pushing us near limit. |
| 2448 | * If there is too much pertrans reservation or it's near the limit, |
| 2449 | * let's try commit transaction to free some, using transaction_kthread |
| 2450 | */ |
| 2451 | if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER | |
| 2452 | BTRFS_QGROUP_LIMIT_MAX_EXCL))) { |
| 2453 | if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) |
| 2454 | limit = qg->max_excl; |
| 2455 | else |
| 2456 | limit = qg->max_rfer; |
| 2457 | threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] - |
| 2458 | qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) / |
| 2459 | QGROUP_PERTRANS_RATIO; |
| 2460 | threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE); |
| 2461 | |
| 2462 | /* |
| 2463 | * Use transaction_kthread to commit transaction, so we no |
| 2464 | * longer need to bother nested transaction nor lock context. |
| 2465 | */ |
| 2466 | if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold) |
| 2467 | btrfs_commit_transaction_locksafe(fs_info); |
| 2468 | } |
| 2469 | |
Jeff Mahoney | 003d7c5 | 2017-01-25 09:50:33 -0500 | [diff] [blame] | 2470 | return true; |
| 2471 | } |
| 2472 | |
Qu Wenruo | dba2132 | 2017-12-12 15:34:25 +0800 | [diff] [blame] | 2473 | static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, |
| 2474 | enum btrfs_qgroup_rsv_type type) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2475 | { |
| 2476 | struct btrfs_root *quota_root; |
| 2477 | struct btrfs_qgroup *qgroup; |
| 2478 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 2479 | u64 ref_root = root->root_key.objectid; |
| 2480 | int ret = 0; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2481 | struct ulist_node *unode; |
| 2482 | struct ulist_iterator uiter; |
| 2483 | |
| 2484 | if (!is_fstree(ref_root)) |
| 2485 | return 0; |
| 2486 | |
| 2487 | if (num_bytes == 0) |
| 2488 | return 0; |
Sargun Dhillon | f29efe2 | 2017-05-11 21:17:33 +0000 | [diff] [blame] | 2489 | |
| 2490 | if (test_bit(BTRFS_FS_QUOTA_OVERRIDE, &fs_info->flags) && |
| 2491 | capable(CAP_SYS_RESOURCE)) |
| 2492 | enforce = false; |
| 2493 | |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2494 | spin_lock(&fs_info->qgroup_lock); |
| 2495 | quota_root = fs_info->quota_root; |
| 2496 | if (!quota_root) |
| 2497 | goto out; |
| 2498 | |
| 2499 | qgroup = find_qgroup_rb(fs_info, ref_root); |
| 2500 | if (!qgroup) |
| 2501 | goto out; |
| 2502 | |
| 2503 | /* |
| 2504 | * in a first step, we check all affected qgroups if any limits would |
| 2505 | * be exceeded |
| 2506 | */ |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 2507 | ulist_reinit(fs_info->qgroup_ulist); |
| 2508 | ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, |
David Sterba | a1840b5 | 2018-03-27 19:04:50 +0200 | [diff] [blame] | 2509 | qgroup_to_aux(qgroup), GFP_ATOMIC); |
Wang Shilong | 3c97185 | 2013-04-17 14:00:36 +0000 | [diff] [blame] | 2510 | if (ret < 0) |
| 2511 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2512 | ULIST_ITER_INIT(&uiter); |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 2513 | while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2514 | struct btrfs_qgroup *qg; |
| 2515 | struct btrfs_qgroup_list *glist; |
| 2516 | |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 2517 | qg = unode_aux_to_qgroup(unode); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2518 | |
Qu Wenruo | a514d63 | 2017-12-22 16:06:39 +0800 | [diff] [blame] | 2519 | if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2520 | ret = -EDQUOT; |
Wang Shilong | 720f1e2 | 2013-03-06 11:51:47 +0000 | [diff] [blame] | 2521 | goto out; |
| 2522 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2523 | |
| 2524 | list_for_each_entry(glist, &qg->groups, next_group) { |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 2525 | ret = ulist_add(fs_info->qgroup_ulist, |
| 2526 | glist->group->qgroupid, |
David Sterba | a1840b5 | 2018-03-27 19:04:50 +0200 | [diff] [blame] | 2527 | qgroup_to_aux(glist->group), GFP_ATOMIC); |
Wang Shilong | 3c97185 | 2013-04-17 14:00:36 +0000 | [diff] [blame] | 2528 | if (ret < 0) |
| 2529 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2530 | } |
| 2531 | } |
Wang Shilong | 3c97185 | 2013-04-17 14:00:36 +0000 | [diff] [blame] | 2532 | ret = 0; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2533 | /* |
| 2534 | * no limits exceeded, now record the reservation into all qgroups |
| 2535 | */ |
| 2536 | ULIST_ITER_INIT(&uiter); |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 2537 | while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2538 | struct btrfs_qgroup *qg; |
| 2539 | |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 2540 | qg = unode_aux_to_qgroup(unode); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2541 | |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 2542 | trace_qgroup_update_reserve(fs_info, qg, num_bytes, type); |
| 2543 | qgroup_rsv_add(fs_info, qg, num_bytes, type); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2544 | } |
| 2545 | |
| 2546 | out: |
| 2547 | spin_unlock(&fs_info->qgroup_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2548 | return ret; |
| 2549 | } |
| 2550 | |
Qu Wenruo | e1211d0 | 2017-12-12 15:34:30 +0800 | [diff] [blame] | 2551 | /* |
| 2552 | * Free @num_bytes of reserved space with @type for qgroup. (Normally level 0 |
| 2553 | * qgroup). |
| 2554 | * |
| 2555 | * Will handle all higher level qgroup too. |
| 2556 | * |
| 2557 | * NOTE: If @num_bytes is (u64)-1, this means to free all bytes of this qgroup. |
| 2558 | * This special case is only used for META_PERTRANS type. |
| 2559 | */ |
Qu Wenruo | 297d750 | 2015-09-08 17:08:37 +0800 | [diff] [blame] | 2560 | void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info, |
Qu Wenruo | d4e5c92 | 2017-12-12 15:34:23 +0800 | [diff] [blame] | 2561 | u64 ref_root, u64 num_bytes, |
| 2562 | enum btrfs_qgroup_rsv_type type) |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2563 | { |
| 2564 | struct btrfs_root *quota_root; |
| 2565 | struct btrfs_qgroup *qgroup; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2566 | struct ulist_node *unode; |
| 2567 | struct ulist_iterator uiter; |
Wang Shilong | 3c97185 | 2013-04-17 14:00:36 +0000 | [diff] [blame] | 2568 | int ret = 0; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2569 | |
| 2570 | if (!is_fstree(ref_root)) |
| 2571 | return; |
| 2572 | |
| 2573 | if (num_bytes == 0) |
| 2574 | return; |
| 2575 | |
Qu Wenruo | e1211d0 | 2017-12-12 15:34:30 +0800 | [diff] [blame] | 2576 | if (num_bytes == (u64)-1 && type != BTRFS_QGROUP_RSV_META_PERTRANS) { |
| 2577 | WARN(1, "%s: Invalid type to free", __func__); |
| 2578 | return; |
| 2579 | } |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2580 | spin_lock(&fs_info->qgroup_lock); |
| 2581 | |
| 2582 | quota_root = fs_info->quota_root; |
| 2583 | if (!quota_root) |
| 2584 | goto out; |
| 2585 | |
| 2586 | qgroup = find_qgroup_rb(fs_info, ref_root); |
| 2587 | if (!qgroup) |
| 2588 | goto out; |
| 2589 | |
Qu Wenruo | e1211d0 | 2017-12-12 15:34:30 +0800 | [diff] [blame] | 2590 | if (num_bytes == (u64)-1) |
Qu Wenruo | 8287475 | 2017-12-12 15:34:34 +0800 | [diff] [blame] | 2591 | /* |
| 2592 | * We're freeing all pertrans rsv, get reserved value from |
| 2593 | * level 0 qgroup as real num_bytes to free. |
| 2594 | */ |
Qu Wenruo | e1211d0 | 2017-12-12 15:34:30 +0800 | [diff] [blame] | 2595 | num_bytes = qgroup->rsv.values[type]; |
| 2596 | |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 2597 | ulist_reinit(fs_info->qgroup_ulist); |
| 2598 | ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, |
David Sterba | a1840b5 | 2018-03-27 19:04:50 +0200 | [diff] [blame] | 2599 | qgroup_to_aux(qgroup), GFP_ATOMIC); |
Wang Shilong | 3c97185 | 2013-04-17 14:00:36 +0000 | [diff] [blame] | 2600 | if (ret < 0) |
| 2601 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2602 | ULIST_ITER_INIT(&uiter); |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 2603 | while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2604 | struct btrfs_qgroup *qg; |
| 2605 | struct btrfs_qgroup_list *glist; |
| 2606 | |
David Sterba | ef2fff6 | 2016-10-26 16:23:50 +0200 | [diff] [blame] | 2607 | qg = unode_aux_to_qgroup(unode); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2608 | |
Qu Wenruo | 64ee4e7 | 2017-12-12 15:34:27 +0800 | [diff] [blame] | 2609 | trace_qgroup_update_reserve(fs_info, qg, -(s64)num_bytes, type); |
| 2610 | qgroup_rsv_release(fs_info, qg, num_bytes, type); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2611 | |
| 2612 | list_for_each_entry(glist, &qg->groups, next_group) { |
Wang Shilong | 1e8f915 | 2013-05-06 11:03:27 +0000 | [diff] [blame] | 2613 | ret = ulist_add(fs_info->qgroup_ulist, |
| 2614 | glist->group->qgroupid, |
David Sterba | a1840b5 | 2018-03-27 19:04:50 +0200 | [diff] [blame] | 2615 | qgroup_to_aux(glist->group), GFP_ATOMIC); |
Wang Shilong | 3c97185 | 2013-04-17 14:00:36 +0000 | [diff] [blame] | 2616 | if (ret < 0) |
| 2617 | goto out; |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2618 | } |
| 2619 | } |
| 2620 | |
| 2621 | out: |
| 2622 | spin_unlock(&fs_info->qgroup_lock); |
Arne Jansen | bed92ea | 2012-06-28 18:03:02 +0200 | [diff] [blame] | 2623 | } |
| 2624 | |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2625 | /* |
Qu Wenruo | ff3d27a0 | 2018-05-14 09:38:13 +0800 | [diff] [blame] | 2626 | * Check if the leaf is the last leaf. Which means all node pointers |
| 2627 | * are at their last position. |
| 2628 | */ |
| 2629 | static bool is_last_leaf(struct btrfs_path *path) |
| 2630 | { |
| 2631 | int i; |
| 2632 | |
| 2633 | for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { |
| 2634 | if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) |
| 2635 | return false; |
| 2636 | } |
| 2637 | return true; |
| 2638 | } |
| 2639 | |
| 2640 | /* |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2641 | * returns < 0 on error, 0 when more leafs are to be scanned. |
Qu Wenruo | 3393168 | 2015-02-27 16:24:24 +0800 | [diff] [blame] | 2642 | * returns 1 when done. |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2643 | */ |
| 2644 | static int |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2645 | qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, |
Qu Wenruo | 0a0e8b89 | 2015-10-26 09:19:43 +0800 | [diff] [blame] | 2646 | struct btrfs_trans_handle *trans) |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2647 | { |
| 2648 | struct btrfs_key found; |
Qu Wenruo | 0a0e8b89 | 2015-10-26 09:19:43 +0800 | [diff] [blame] | 2649 | struct extent_buffer *scratch_leaf = NULL; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2650 | struct ulist *roots = NULL; |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 2651 | u64 num_bytes; |
Qu Wenruo | ff3d27a0 | 2018-05-14 09:38:13 +0800 | [diff] [blame] | 2652 | bool done; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2653 | int slot; |
| 2654 | int ret; |
| 2655 | |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2656 | mutex_lock(&fs_info->qgroup_rescan_lock); |
| 2657 | ret = btrfs_search_slot_for_read(fs_info->extent_root, |
| 2658 | &fs_info->qgroup_rescan_progress, |
| 2659 | path, 1, 0); |
| 2660 | |
Jeff Mahoney | ab8d0fc | 2016-09-20 10:05:02 -0400 | [diff] [blame] | 2661 | btrfs_debug(fs_info, |
| 2662 | "current progress key (%llu %u %llu), search_slot ret %d", |
| 2663 | fs_info->qgroup_rescan_progress.objectid, |
| 2664 | fs_info->qgroup_rescan_progress.type, |
| 2665 | fs_info->qgroup_rescan_progress.offset, ret); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2666 | |
| 2667 | if (ret) { |
| 2668 | /* |
| 2669 | * The rescan is about to end, we will not be scanning any |
| 2670 | * further blocks. We cannot unset the RESCAN flag here, because |
| 2671 | * we want to commit the transaction if everything went well. |
| 2672 | * To make the live accounting work in this phase, we set our |
| 2673 | * scan progress pointer such that every real extent objectid |
| 2674 | * will be smaller. |
| 2675 | */ |
| 2676 | fs_info->qgroup_rescan_progress.objectid = (u64)-1; |
| 2677 | btrfs_release_path(path); |
| 2678 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2679 | return ret; |
| 2680 | } |
Qu Wenruo | ff3d27a0 | 2018-05-14 09:38:13 +0800 | [diff] [blame] | 2681 | done = is_last_leaf(path); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2682 | |
| 2683 | btrfs_item_key_to_cpu(path->nodes[0], &found, |
| 2684 | btrfs_header_nritems(path->nodes[0]) - 1); |
| 2685 | fs_info->qgroup_rescan_progress.objectid = found.objectid + 1; |
| 2686 | |
Qu Wenruo | 0a0e8b89 | 2015-10-26 09:19:43 +0800 | [diff] [blame] | 2687 | scratch_leaf = btrfs_clone_extent_buffer(path->nodes[0]); |
| 2688 | if (!scratch_leaf) { |
| 2689 | ret = -ENOMEM; |
| 2690 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2691 | goto out; |
| 2692 | } |
| 2693 | extent_buffer_get(scratch_leaf); |
| 2694 | btrfs_tree_read_lock(scratch_leaf); |
| 2695 | btrfs_set_lock_blocking_rw(scratch_leaf, BTRFS_READ_LOCK); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2696 | slot = path->slots[0]; |
| 2697 | btrfs_release_path(path); |
| 2698 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2699 | |
| 2700 | for (; slot < btrfs_header_nritems(scratch_leaf); ++slot) { |
| 2701 | btrfs_item_key_to_cpu(scratch_leaf, &found, slot); |
Josef Bacik | 3a6d75e | 2014-01-23 16:45:10 -0500 | [diff] [blame] | 2702 | if (found.type != BTRFS_EXTENT_ITEM_KEY && |
| 2703 | found.type != BTRFS_METADATA_ITEM_KEY) |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2704 | continue; |
Josef Bacik | 3a6d75e | 2014-01-23 16:45:10 -0500 | [diff] [blame] | 2705 | if (found.type == BTRFS_METADATA_ITEM_KEY) |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 2706 | num_bytes = fs_info->nodesize; |
Josef Bacik | 3a6d75e | 2014-01-23 16:45:10 -0500 | [diff] [blame] | 2707 | else |
| 2708 | num_bytes = found.offset; |
| 2709 | |
Josef Bacik | fcebe45 | 2014-05-13 17:30:47 -0700 | [diff] [blame] | 2710 | ret = btrfs_find_all_roots(NULL, fs_info, found.objectid, 0, |
Zygo Blaxell | c995ab3 | 2017-09-22 13:58:45 -0400 | [diff] [blame] | 2711 | &roots, false); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2712 | if (ret < 0) |
| 2713 | goto out; |
Qu Wenruo | 9d220c9 | 2015-04-13 11:02:16 +0800 | [diff] [blame] | 2714 | /* For rescan, just pass old_roots as NULL */ |
| 2715 | ret = btrfs_qgroup_account_extent(trans, fs_info, |
| 2716 | found.objectid, num_bytes, NULL, roots); |
| 2717 | if (ret < 0) |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2718 | goto out; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2719 | } |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2720 | out: |
Qu Wenruo | 0a0e8b89 | 2015-10-26 09:19:43 +0800 | [diff] [blame] | 2721 | if (scratch_leaf) { |
| 2722 | btrfs_tree_read_unlock_blocking(scratch_leaf); |
| 2723 | free_extent_buffer(scratch_leaf); |
| 2724 | } |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2725 | |
Qu Wenruo | 6f7de19 | 2018-06-27 18:19:55 +0800 | [diff] [blame] | 2726 | if (done && !ret) { |
Qu Wenruo | ff3d27a0 | 2018-05-14 09:38:13 +0800 | [diff] [blame] | 2727 | ret = 1; |
Qu Wenruo | 6f7de19 | 2018-06-27 18:19:55 +0800 | [diff] [blame] | 2728 | fs_info->qgroup_rescan_progress.objectid = (u64)-1; |
| 2729 | } |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2730 | return ret; |
| 2731 | } |
| 2732 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 2733 | static void btrfs_qgroup_rescan_worker(struct btrfs_work *work) |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2734 | { |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2735 | struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, |
| 2736 | qgroup_rescan_work); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2737 | struct btrfs_path *path; |
| 2738 | struct btrfs_trans_handle *trans = NULL; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2739 | int err = -ENOMEM; |
Qu Wenruo | 53b7cde | 2015-02-27 16:24:25 +0800 | [diff] [blame] | 2740 | int ret = 0; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2741 | |
| 2742 | path = btrfs_alloc_path(); |
| 2743 | if (!path) |
| 2744 | goto out; |
Qu Wenruo | b6debf1 | 2018-05-14 09:38:12 +0800 | [diff] [blame] | 2745 | /* |
| 2746 | * Rescan should only search for commit root, and any later difference |
| 2747 | * should be recorded by qgroup |
| 2748 | */ |
| 2749 | path->search_commit_root = 1; |
| 2750 | path->skip_locking = 1; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2751 | |
| 2752 | err = 0; |
Justin Maggard | 7343dd6 | 2015-11-04 15:56:16 -0800 | [diff] [blame] | 2753 | while (!err && !btrfs_fs_closing(fs_info)) { |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2754 | trans = btrfs_start_transaction(fs_info->fs_root, 0); |
| 2755 | if (IS_ERR(trans)) { |
| 2756 | err = PTR_ERR(trans); |
| 2757 | break; |
| 2758 | } |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 2759 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2760 | err = -EINTR; |
| 2761 | } else { |
Qu Wenruo | 0a0e8b89 | 2015-10-26 09:19:43 +0800 | [diff] [blame] | 2762 | err = qgroup_rescan_leaf(fs_info, path, trans); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2763 | } |
| 2764 | if (err > 0) |
Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 2765 | btrfs_commit_transaction(trans); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2766 | else |
Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 2767 | btrfs_end_transaction(trans); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2768 | } |
| 2769 | |
| 2770 | out: |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2771 | btrfs_free_path(path); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2772 | |
| 2773 | mutex_lock(&fs_info->qgroup_rescan_lock); |
Justin Maggard | 7343dd6 | 2015-11-04 15:56:16 -0800 | [diff] [blame] | 2774 | if (!btrfs_fs_closing(fs_info)) |
| 2775 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2776 | |
Qu Wenruo | 3393168 | 2015-02-27 16:24:24 +0800 | [diff] [blame] | 2777 | if (err > 0 && |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2778 | fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) { |
| 2779 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 2780 | } else if (err < 0) { |
| 2781 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; |
| 2782 | } |
| 2783 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2784 | |
Qu Wenruo | 53b7cde | 2015-02-27 16:24:25 +0800 | [diff] [blame] | 2785 | /* |
Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 2786 | * only update status, since the previous part has already updated the |
Qu Wenruo | 53b7cde | 2015-02-27 16:24:25 +0800 | [diff] [blame] | 2787 | * qgroup info. |
| 2788 | */ |
| 2789 | trans = btrfs_start_transaction(fs_info->quota_root, 1); |
| 2790 | if (IS_ERR(trans)) { |
| 2791 | err = PTR_ERR(trans); |
| 2792 | btrfs_err(fs_info, |
David Sterba | 913e153 | 2017-07-13 15:32:18 +0200 | [diff] [blame] | 2793 | "fail to start transaction for status update: %d", |
Qu Wenruo | 53b7cde | 2015-02-27 16:24:25 +0800 | [diff] [blame] | 2794 | err); |
| 2795 | goto done; |
| 2796 | } |
| 2797 | ret = update_qgroup_status_item(trans, fs_info, fs_info->quota_root); |
| 2798 | if (ret < 0) { |
| 2799 | err = ret; |
Jeff Mahoney | ab8d0fc | 2016-09-20 10:05:02 -0400 | [diff] [blame] | 2800 | btrfs_err(fs_info, "fail to update qgroup status: %d", err); |
Qu Wenruo | 53b7cde | 2015-02-27 16:24:25 +0800 | [diff] [blame] | 2801 | } |
Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 2802 | btrfs_end_transaction(trans); |
Qu Wenruo | 53b7cde | 2015-02-27 16:24:25 +0800 | [diff] [blame] | 2803 | |
Justin Maggard | 7343dd6 | 2015-11-04 15:56:16 -0800 | [diff] [blame] | 2804 | if (btrfs_fs_closing(fs_info)) { |
| 2805 | btrfs_info(fs_info, "qgroup scan paused"); |
| 2806 | } else if (err >= 0) { |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 2807 | btrfs_info(fs_info, "qgroup scan completed%s", |
Qu Wenruo | 3393168 | 2015-02-27 16:24:24 +0800 | [diff] [blame] | 2808 | err > 0 ? " (inconsistency flag cleared)" : ""); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2809 | } else { |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 2810 | btrfs_err(fs_info, "qgroup scan failed with %d", err); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2811 | } |
Jan Schmidt | 57254b6e | 2013-05-06 19:14:17 +0000 | [diff] [blame] | 2812 | |
Qu Wenruo | 53b7cde | 2015-02-27 16:24:25 +0800 | [diff] [blame] | 2813 | done: |
Jeff Mahoney | d2c609b | 2016-08-15 12:10:33 -0400 | [diff] [blame] | 2814 | mutex_lock(&fs_info->qgroup_rescan_lock); |
| 2815 | fs_info->qgroup_rescan_running = false; |
| 2816 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
Jan Schmidt | 57254b6e | 2013-05-06 19:14:17 +0000 | [diff] [blame] | 2817 | complete_all(&fs_info->qgroup_rescan_completion); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2818 | } |
| 2819 | |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2820 | /* |
| 2821 | * Checks that (a) no rescan is running and (b) quota is enabled. Allocates all |
| 2822 | * memory required for the rescan context. |
| 2823 | */ |
| 2824 | static int |
| 2825 | qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, |
| 2826 | int init_flags) |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2827 | { |
| 2828 | int ret = 0; |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2829 | |
Qu Wenruo | 9593bf49 | 2018-05-02 13:28:03 +0800 | [diff] [blame] | 2830 | if (!init_flags) { |
| 2831 | /* we're resuming qgroup rescan at mount time */ |
Filipe Manana | e4e7ede | 2018-06-27 00:43:15 +0100 | [diff] [blame] | 2832 | if (!(fs_info->qgroup_flags & |
| 2833 | BTRFS_QGROUP_STATUS_FLAG_RESCAN)) { |
Qu Wenruo | 9593bf49 | 2018-05-02 13:28:03 +0800 | [diff] [blame] | 2834 | btrfs_warn(fs_info, |
| 2835 | "qgroup rescan init failed, qgroup is not enabled"); |
Filipe Manana | e4e7ede | 2018-06-27 00:43:15 +0100 | [diff] [blame] | 2836 | ret = -EINVAL; |
| 2837 | } else if (!(fs_info->qgroup_flags & |
| 2838 | BTRFS_QGROUP_STATUS_FLAG_ON)) { |
Qu Wenruo | 9593bf49 | 2018-05-02 13:28:03 +0800 | [diff] [blame] | 2839 | btrfs_warn(fs_info, |
| 2840 | "qgroup rescan init failed, qgroup rescan is not queued"); |
Filipe Manana | e4e7ede | 2018-06-27 00:43:15 +0100 | [diff] [blame] | 2841 | ret = -EINVAL; |
| 2842 | } |
| 2843 | |
| 2844 | if (ret) |
| 2845 | return ret; |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2846 | } |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2847 | |
| 2848 | mutex_lock(&fs_info->qgroup_rescan_lock); |
| 2849 | spin_lock(&fs_info->qgroup_lock); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2850 | |
| 2851 | if (init_flags) { |
Qu Wenruo | 9593bf49 | 2018-05-02 13:28:03 +0800 | [diff] [blame] | 2852 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { |
| 2853 | btrfs_warn(fs_info, |
| 2854 | "qgroup rescan is already in progress"); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2855 | ret = -EINPROGRESS; |
Qu Wenruo | 9593bf49 | 2018-05-02 13:28:03 +0800 | [diff] [blame] | 2856 | } else if (!(fs_info->qgroup_flags & |
| 2857 | BTRFS_QGROUP_STATUS_FLAG_ON)) { |
| 2858 | btrfs_warn(fs_info, |
| 2859 | "qgroup rescan init failed, qgroup is not enabled"); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2860 | ret = -EINVAL; |
Qu Wenruo | 9593bf49 | 2018-05-02 13:28:03 +0800 | [diff] [blame] | 2861 | } |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2862 | |
| 2863 | if (ret) { |
| 2864 | spin_unlock(&fs_info->qgroup_lock); |
| 2865 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
Qu Wenruo | 9593bf49 | 2018-05-02 13:28:03 +0800 | [diff] [blame] | 2866 | return ret; |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2867 | } |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2868 | fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
| 2869 | } |
| 2870 | |
| 2871 | memset(&fs_info->qgroup_rescan_progress, 0, |
| 2872 | sizeof(fs_info->qgroup_rescan_progress)); |
| 2873 | fs_info->qgroup_rescan_progress.objectid = progress_objectid; |
Filipe Manana | 190631f | 2015-11-05 10:06:23 +0000 | [diff] [blame] | 2874 | init_completion(&fs_info->qgroup_rescan_completion); |
Filipe Manana | 8d9edda | 2016-11-24 02:09:04 +0000 | [diff] [blame] | 2875 | fs_info->qgroup_rescan_running = true; |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2876 | |
| 2877 | spin_unlock(&fs_info->qgroup_lock); |
| 2878 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2879 | |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2880 | memset(&fs_info->qgroup_rescan_work, 0, |
| 2881 | sizeof(fs_info->qgroup_rescan_work)); |
Qu Wenruo | fc97fab | 2014-02-28 10:46:16 +0800 | [diff] [blame] | 2882 | btrfs_init_work(&fs_info->qgroup_rescan_work, |
Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 2883 | btrfs_qgroup_rescan_helper, |
Qu Wenruo | fc97fab | 2014-02-28 10:46:16 +0800 | [diff] [blame] | 2884 | btrfs_qgroup_rescan_worker, NULL, NULL); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2885 | return 0; |
| 2886 | } |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2887 | |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2888 | static void |
| 2889 | qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info) |
| 2890 | { |
| 2891 | struct rb_node *n; |
| 2892 | struct btrfs_qgroup *qgroup; |
| 2893 | |
| 2894 | spin_lock(&fs_info->qgroup_lock); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2895 | /* clear all current qgroup tracking information */ |
| 2896 | for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) { |
| 2897 | qgroup = rb_entry(n, struct btrfs_qgroup, node); |
| 2898 | qgroup->rfer = 0; |
| 2899 | qgroup->rfer_cmpr = 0; |
| 2900 | qgroup->excl = 0; |
| 2901 | qgroup->excl_cmpr = 0; |
| 2902 | } |
| 2903 | spin_unlock(&fs_info->qgroup_lock); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2904 | } |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2905 | |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2906 | int |
| 2907 | btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info) |
| 2908 | { |
| 2909 | int ret = 0; |
| 2910 | struct btrfs_trans_handle *trans; |
| 2911 | |
| 2912 | ret = qgroup_rescan_init(fs_info, 0, 1); |
| 2913 | if (ret) |
| 2914 | return ret; |
| 2915 | |
| 2916 | /* |
| 2917 | * We have set the rescan_progress to 0, which means no more |
| 2918 | * delayed refs will be accounted by btrfs_qgroup_account_ref. |
| 2919 | * However, btrfs_qgroup_account_ref may be right after its call |
| 2920 | * to btrfs_find_all_roots, in which case it would still do the |
| 2921 | * accounting. |
| 2922 | * To solve this, we're committing the transaction, which will |
| 2923 | * ensure we run all delayed refs and only after that, we are |
| 2924 | * going to clear all tracking information for a clean start. |
| 2925 | */ |
| 2926 | |
| 2927 | trans = btrfs_join_transaction(fs_info->fs_root); |
| 2928 | if (IS_ERR(trans)) { |
| 2929 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
| 2930 | return PTR_ERR(trans); |
| 2931 | } |
Jeff Mahoney | 3a45bb2 | 2016-09-09 21:39:03 -0400 | [diff] [blame] | 2932 | ret = btrfs_commit_transaction(trans); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2933 | if (ret) { |
| 2934 | fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN; |
| 2935 | return ret; |
| 2936 | } |
| 2937 | |
| 2938 | qgroup_rescan_zero_tracking(fs_info); |
| 2939 | |
Qu Wenruo | fc97fab | 2014-02-28 10:46:16 +0800 | [diff] [blame] | 2940 | btrfs_queue_work(fs_info->qgroup_rescan_workers, |
| 2941 | &fs_info->qgroup_rescan_work); |
Jan Schmidt | 2f23203 | 2013-04-25 16:04:51 +0000 | [diff] [blame] | 2942 | |
| 2943 | return 0; |
| 2944 | } |
Jan Schmidt | 57254b6e | 2013-05-06 19:14:17 +0000 | [diff] [blame] | 2945 | |
Jeff Mahoney | d06f23d | 2016-08-08 22:08:06 -0400 | [diff] [blame] | 2946 | int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info, |
| 2947 | bool interruptible) |
Jan Schmidt | 57254b6e | 2013-05-06 19:14:17 +0000 | [diff] [blame] | 2948 | { |
| 2949 | int running; |
| 2950 | int ret = 0; |
| 2951 | |
| 2952 | mutex_lock(&fs_info->qgroup_rescan_lock); |
| 2953 | spin_lock(&fs_info->qgroup_lock); |
Jeff Mahoney | d2c609b | 2016-08-15 12:10:33 -0400 | [diff] [blame] | 2954 | running = fs_info->qgroup_rescan_running; |
Jan Schmidt | 57254b6e | 2013-05-06 19:14:17 +0000 | [diff] [blame] | 2955 | spin_unlock(&fs_info->qgroup_lock); |
| 2956 | mutex_unlock(&fs_info->qgroup_rescan_lock); |
| 2957 | |
Jeff Mahoney | d06f23d | 2016-08-08 22:08:06 -0400 | [diff] [blame] | 2958 | if (!running) |
| 2959 | return 0; |
| 2960 | |
| 2961 | if (interruptible) |
Jan Schmidt | 57254b6e | 2013-05-06 19:14:17 +0000 | [diff] [blame] | 2962 | ret = wait_for_completion_interruptible( |
| 2963 | &fs_info->qgroup_rescan_completion); |
Jeff Mahoney | d06f23d | 2016-08-08 22:08:06 -0400 | [diff] [blame] | 2964 | else |
| 2965 | wait_for_completion(&fs_info->qgroup_rescan_completion); |
Jan Schmidt | 57254b6e | 2013-05-06 19:14:17 +0000 | [diff] [blame] | 2966 | |
| 2967 | return ret; |
| 2968 | } |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2969 | |
| 2970 | /* |
| 2971 | * this is only called from open_ctree where we're still single threaded, thus |
| 2972 | * locking is omitted here. |
| 2973 | */ |
| 2974 | void |
| 2975 | btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info) |
| 2976 | { |
| 2977 | if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) |
Qu Wenruo | fc97fab | 2014-02-28 10:46:16 +0800 | [diff] [blame] | 2978 | btrfs_queue_work(fs_info->qgroup_rescan_workers, |
| 2979 | &fs_info->qgroup_rescan_work); |
Jan Schmidt | b382a32 | 2013-05-28 15:47:24 +0000 | [diff] [blame] | 2980 | } |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 2981 | |
| 2982 | /* |
| 2983 | * Reserve qgroup space for range [start, start + len). |
| 2984 | * |
| 2985 | * This function will either reserve space from related qgroups or doing |
| 2986 | * nothing if the range is already reserved. |
| 2987 | * |
| 2988 | * Return 0 for successful reserve |
| 2989 | * Return <0 for error (including -EQUOT) |
| 2990 | * |
| 2991 | * NOTE: this function may sleep for memory allocation. |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 2992 | * if btrfs_qgroup_reserve_data() is called multiple times with |
| 2993 | * same @reserved, caller must ensure when error happens it's OK |
| 2994 | * to free *ALL* reserved space. |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 2995 | */ |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 2996 | int btrfs_qgroup_reserve_data(struct inode *inode, |
| 2997 | struct extent_changeset **reserved_ret, u64 start, |
| 2998 | u64 len) |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 2999 | { |
| 3000 | struct btrfs_root *root = BTRFS_I(inode)->root; |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3001 | struct ulist_node *unode; |
| 3002 | struct ulist_iterator uiter; |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3003 | struct extent_changeset *reserved; |
| 3004 | u64 orig_reserved; |
| 3005 | u64 to_reserve; |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3006 | int ret; |
| 3007 | |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 3008 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) || |
| 3009 | !is_fstree(root->objectid) || len == 0) |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3010 | return 0; |
| 3011 | |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3012 | /* @reserved parameter is mandatory for qgroup */ |
| 3013 | if (WARN_ON(!reserved_ret)) |
| 3014 | return -EINVAL; |
| 3015 | if (!*reserved_ret) { |
| 3016 | *reserved_ret = extent_changeset_alloc(); |
| 3017 | if (!*reserved_ret) |
| 3018 | return -ENOMEM; |
| 3019 | } |
| 3020 | reserved = *reserved_ret; |
| 3021 | /* Record already reserved space */ |
| 3022 | orig_reserved = reserved->bytes_changed; |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3023 | ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start, |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3024 | start + len -1, EXTENT_QGROUP_RESERVED, reserved); |
| 3025 | |
| 3026 | /* Newly reserved space */ |
| 3027 | to_reserve = reserved->bytes_changed - orig_reserved; |
Qu Wenruo | 81fb6f7 | 2015-09-28 16:57:53 +0800 | [diff] [blame] | 3028 | trace_btrfs_qgroup_reserve_data(inode, start, len, |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3029 | to_reserve, QGROUP_RESERVE); |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3030 | if (ret < 0) |
| 3031 | goto cleanup; |
Qu Wenruo | dba2132 | 2017-12-12 15:34:25 +0800 | [diff] [blame] | 3032 | ret = qgroup_reserve(root, to_reserve, true, BTRFS_QGROUP_RSV_DATA); |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3033 | if (ret < 0) |
| 3034 | goto cleanup; |
| 3035 | |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3036 | return ret; |
| 3037 | |
| 3038 | cleanup: |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3039 | /* cleanup *ALL* already reserved ranges */ |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3040 | ULIST_ITER_INIT(&uiter); |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3041 | while ((unode = ulist_next(&reserved->range_changed, &uiter))) |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3042 | clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val, |
David Sterba | ae0f162 | 2017-10-31 16:37:52 +0100 | [diff] [blame] | 3043 | unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL); |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3044 | extent_changeset_release(reserved); |
Qu Wenruo | 5247255 | 2015-10-12 16:05:40 +0800 | [diff] [blame] | 3045 | return ret; |
| 3046 | } |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3047 | |
Qu Wenruo | bc42bda | 2017-02-27 15:10:39 +0800 | [diff] [blame] | 3048 | /* Free ranges specified by @reserved, normally in error path */ |
| 3049 | static int qgroup_free_reserved_data(struct inode *inode, |
| 3050 | struct extent_changeset *reserved, u64 start, u64 len) |
| 3051 | { |
| 3052 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 3053 | struct ulist_node *unode; |
| 3054 | struct ulist_iterator uiter; |
| 3055 | struct extent_changeset changeset; |
| 3056 | int freed = 0; |
| 3057 | int ret; |
| 3058 | |
| 3059 | extent_changeset_init(&changeset); |
| 3060 | len = round_up(start + len, root->fs_info->sectorsize); |
| 3061 | start = round_down(start, root->fs_info->sectorsize); |
| 3062 | |
| 3063 | ULIST_ITER_INIT(&uiter); |
| 3064 | while ((unode = ulist_next(&reserved->range_changed, &uiter))) { |
| 3065 | u64 range_start = unode->val; |
| 3066 | /* unode->aux is the inclusive end */ |
| 3067 | u64 range_len = unode->aux - range_start + 1; |
| 3068 | u64 free_start; |
| 3069 | u64 free_len; |
| 3070 | |
| 3071 | extent_changeset_release(&changeset); |
| 3072 | |
| 3073 | /* Only free range in range [start, start + len) */ |
| 3074 | if (range_start >= start + len || |
| 3075 | range_start + range_len <= start) |
| 3076 | continue; |
| 3077 | free_start = max(range_start, start); |
| 3078 | free_len = min(start + len, range_start + range_len) - |
| 3079 | free_start; |
| 3080 | /* |
| 3081 | * TODO: To also modify reserved->ranges_reserved to reflect |
| 3082 | * the modification. |
| 3083 | * |
| 3084 | * However as long as we free qgroup reserved according to |
| 3085 | * EXTENT_QGROUP_RESERVED, we won't double free. |
| 3086 | * So not need to rush. |
| 3087 | */ |
| 3088 | ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree, |
| 3089 | free_start, free_start + free_len - 1, |
| 3090 | EXTENT_QGROUP_RESERVED, &changeset); |
| 3091 | if (ret < 0) |
| 3092 | goto out; |
| 3093 | freed += changeset.bytes_changed; |
| 3094 | } |
Qu Wenruo | d4e5c92 | 2017-12-12 15:34:23 +0800 | [diff] [blame] | 3095 | btrfs_qgroup_free_refroot(root->fs_info, root->objectid, freed, |
| 3096 | BTRFS_QGROUP_RSV_DATA); |
Qu Wenruo | bc42bda | 2017-02-27 15:10:39 +0800 | [diff] [blame] | 3097 | ret = freed; |
| 3098 | out: |
| 3099 | extent_changeset_release(&changeset); |
| 3100 | return ret; |
| 3101 | } |
| 3102 | |
| 3103 | static int __btrfs_qgroup_release_data(struct inode *inode, |
| 3104 | struct extent_changeset *reserved, u64 start, u64 len, |
| 3105 | int free) |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3106 | { |
| 3107 | struct extent_changeset changeset; |
Qu Wenruo | 81fb6f7 | 2015-09-28 16:57:53 +0800 | [diff] [blame] | 3108 | int trace_op = QGROUP_RELEASE; |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3109 | int ret; |
| 3110 | |
Qu Wenruo | bc42bda | 2017-02-27 15:10:39 +0800 | [diff] [blame] | 3111 | /* In release case, we shouldn't have @reserved */ |
| 3112 | WARN_ON(!free && reserved); |
| 3113 | if (free && reserved) |
| 3114 | return qgroup_free_reserved_data(inode, reserved, start, len); |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3115 | extent_changeset_init(&changeset); |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3116 | ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start, |
David Sterba | f734c44 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 3117 | start + len -1, EXTENT_QGROUP_RESERVED, &changeset); |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3118 | if (ret < 0) |
| 3119 | goto out; |
| 3120 | |
Qu Wenruo | d51ea5d | 2017-03-13 15:52:09 +0800 | [diff] [blame] | 3121 | if (free) |
| 3122 | trace_op = QGROUP_FREE; |
| 3123 | trace_btrfs_qgroup_release_data(inode, start, len, |
| 3124 | changeset.bytes_changed, trace_op); |
| 3125 | if (free) |
David Sterba | 0b08e1f | 2017-02-13 14:24:35 +0100 | [diff] [blame] | 3126 | btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, |
| 3127 | BTRFS_I(inode)->root->objectid, |
Qu Wenruo | d4e5c92 | 2017-12-12 15:34:23 +0800 | [diff] [blame] | 3128 | changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); |
Qu Wenruo | 7bc329c | 2017-02-27 15:10:36 +0800 | [diff] [blame] | 3129 | ret = changeset.bytes_changed; |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3130 | out: |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3131 | extent_changeset_release(&changeset); |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3132 | return ret; |
| 3133 | } |
| 3134 | |
| 3135 | /* |
| 3136 | * Free a reserved space range from io_tree and related qgroups |
| 3137 | * |
| 3138 | * Should be called when a range of pages get invalidated before reaching disk. |
| 3139 | * Or for error cleanup case. |
Qu Wenruo | bc42bda | 2017-02-27 15:10:39 +0800 | [diff] [blame] | 3140 | * if @reserved is given, only reserved range in [@start, @start + @len) will |
| 3141 | * be freed. |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3142 | * |
| 3143 | * For data written to disk, use btrfs_qgroup_release_data(). |
| 3144 | * |
| 3145 | * NOTE: This function may sleep for memory allocation. |
| 3146 | */ |
Qu Wenruo | bc42bda | 2017-02-27 15:10:39 +0800 | [diff] [blame] | 3147 | int btrfs_qgroup_free_data(struct inode *inode, |
| 3148 | struct extent_changeset *reserved, u64 start, u64 len) |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3149 | { |
Qu Wenruo | bc42bda | 2017-02-27 15:10:39 +0800 | [diff] [blame] | 3150 | return __btrfs_qgroup_release_data(inode, reserved, start, len, 1); |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3151 | } |
| 3152 | |
| 3153 | /* |
| 3154 | * Release a reserved space range from io_tree only. |
| 3155 | * |
| 3156 | * Should be called when a range of pages get written to disk and corresponding |
| 3157 | * FILE_EXTENT is inserted into corresponding root. |
| 3158 | * |
| 3159 | * Since new qgroup accounting framework will only update qgroup numbers at |
| 3160 | * commit_transaction() time, its reserved space shouldn't be freed from |
| 3161 | * related qgroups. |
| 3162 | * |
| 3163 | * But we should release the range from io_tree, to allow further write to be |
| 3164 | * COWed. |
| 3165 | * |
| 3166 | * NOTE: This function may sleep for memory allocation. |
| 3167 | */ |
| 3168 | int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len) |
| 3169 | { |
Qu Wenruo | bc42bda | 2017-02-27 15:10:39 +0800 | [diff] [blame] | 3170 | return __btrfs_qgroup_release_data(inode, NULL, start, len, 0); |
Qu Wenruo | f695fdc | 2015-10-12 16:28:06 +0800 | [diff] [blame] | 3171 | } |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3172 | |
Qu Wenruo | 8287475 | 2017-12-12 15:34:34 +0800 | [diff] [blame] | 3173 | static void add_root_meta_rsv(struct btrfs_root *root, int num_bytes, |
| 3174 | enum btrfs_qgroup_rsv_type type) |
| 3175 | { |
| 3176 | if (type != BTRFS_QGROUP_RSV_META_PREALLOC && |
| 3177 | type != BTRFS_QGROUP_RSV_META_PERTRANS) |
| 3178 | return; |
| 3179 | if (num_bytes == 0) |
| 3180 | return; |
| 3181 | |
| 3182 | spin_lock(&root->qgroup_meta_rsv_lock); |
| 3183 | if (type == BTRFS_QGROUP_RSV_META_PREALLOC) |
| 3184 | root->qgroup_meta_rsv_prealloc += num_bytes; |
| 3185 | else |
| 3186 | root->qgroup_meta_rsv_pertrans += num_bytes; |
| 3187 | spin_unlock(&root->qgroup_meta_rsv_lock); |
| 3188 | } |
| 3189 | |
| 3190 | static int sub_root_meta_rsv(struct btrfs_root *root, int num_bytes, |
| 3191 | enum btrfs_qgroup_rsv_type type) |
| 3192 | { |
| 3193 | if (type != BTRFS_QGROUP_RSV_META_PREALLOC && |
| 3194 | type != BTRFS_QGROUP_RSV_META_PERTRANS) |
| 3195 | return 0; |
| 3196 | if (num_bytes == 0) |
| 3197 | return 0; |
| 3198 | |
| 3199 | spin_lock(&root->qgroup_meta_rsv_lock); |
| 3200 | if (type == BTRFS_QGROUP_RSV_META_PREALLOC) { |
| 3201 | num_bytes = min_t(u64, root->qgroup_meta_rsv_prealloc, |
| 3202 | num_bytes); |
| 3203 | root->qgroup_meta_rsv_prealloc -= num_bytes; |
| 3204 | } else { |
| 3205 | num_bytes = min_t(u64, root->qgroup_meta_rsv_pertrans, |
| 3206 | num_bytes); |
| 3207 | root->qgroup_meta_rsv_pertrans -= num_bytes; |
| 3208 | } |
| 3209 | spin_unlock(&root->qgroup_meta_rsv_lock); |
| 3210 | return num_bytes; |
| 3211 | } |
| 3212 | |
Qu Wenruo | 733e03a | 2017-12-12 15:34:29 +0800 | [diff] [blame] | 3213 | int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes, |
| 3214 | enum btrfs_qgroup_rsv_type type, bool enforce) |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3215 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3216 | struct btrfs_fs_info *fs_info = root->fs_info; |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3217 | int ret; |
| 3218 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3219 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 3220 | !is_fstree(root->objectid) || num_bytes == 0) |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3221 | return 0; |
| 3222 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3223 | BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); |
Qu Wenruo | 4ee0d88 | 2017-12-12 15:34:35 +0800 | [diff] [blame] | 3224 | trace_qgroup_meta_reserve(root, type, (s64)num_bytes); |
Qu Wenruo | 733e03a | 2017-12-12 15:34:29 +0800 | [diff] [blame] | 3225 | ret = qgroup_reserve(root, num_bytes, enforce, type); |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3226 | if (ret < 0) |
| 3227 | return ret; |
Qu Wenruo | 8287475 | 2017-12-12 15:34:34 +0800 | [diff] [blame] | 3228 | /* |
| 3229 | * Record what we have reserved into root. |
| 3230 | * |
| 3231 | * To avoid quota disabled->enabled underflow. |
| 3232 | * In that case, we may try to free space we haven't reserved |
| 3233 | * (since quota was disabled), so record what we reserved into root. |
| 3234 | * And ensure later release won't underflow this number. |
| 3235 | */ |
| 3236 | add_root_meta_rsv(root, num_bytes, type); |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3237 | return ret; |
| 3238 | } |
| 3239 | |
Qu Wenruo | 733e03a | 2017-12-12 15:34:29 +0800 | [diff] [blame] | 3240 | void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root) |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3241 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3242 | struct btrfs_fs_info *fs_info = root->fs_info; |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3243 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3244 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 3245 | !is_fstree(root->objectid)) |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3246 | return; |
| 3247 | |
Qu Wenruo | e1211d0 | 2017-12-12 15:34:30 +0800 | [diff] [blame] | 3248 | /* TODO: Update trace point to handle such free */ |
Qu Wenruo | 4ee0d88 | 2017-12-12 15:34:35 +0800 | [diff] [blame] | 3249 | trace_qgroup_meta_free_all_pertrans(root); |
Qu Wenruo | e1211d0 | 2017-12-12 15:34:30 +0800 | [diff] [blame] | 3250 | /* Special value -1 means to free all reserved space */ |
| 3251 | btrfs_qgroup_free_refroot(fs_info, root->objectid, (u64)-1, |
Qu Wenruo | 733e03a | 2017-12-12 15:34:29 +0800 | [diff] [blame] | 3252 | BTRFS_QGROUP_RSV_META_PERTRANS); |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3253 | } |
| 3254 | |
Qu Wenruo | 733e03a | 2017-12-12 15:34:29 +0800 | [diff] [blame] | 3255 | void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes, |
| 3256 | enum btrfs_qgroup_rsv_type type) |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3257 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3258 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 3259 | |
| 3260 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || |
Josef Bacik | afcdd12 | 2016-09-02 15:40:02 -0400 | [diff] [blame] | 3261 | !is_fstree(root->objectid)) |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3262 | return; |
| 3263 | |
Qu Wenruo | 8287475 | 2017-12-12 15:34:34 +0800 | [diff] [blame] | 3264 | /* |
| 3265 | * reservation for META_PREALLOC can happen before quota is enabled, |
| 3266 | * which can lead to underflow. |
| 3267 | * Here ensure we will only free what we really have reserved. |
| 3268 | */ |
| 3269 | num_bytes = sub_root_meta_rsv(root, num_bytes, type); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 3270 | BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize)); |
Qu Wenruo | 4ee0d88 | 2017-12-12 15:34:35 +0800 | [diff] [blame] | 3271 | trace_qgroup_meta_reserve(root, type, -(s64)num_bytes); |
Qu Wenruo | 733e03a | 2017-12-12 15:34:29 +0800 | [diff] [blame] | 3272 | btrfs_qgroup_free_refroot(fs_info, root->objectid, num_bytes, type); |
Qu Wenruo | 55eeaf0 | 2015-09-08 17:08:38 +0800 | [diff] [blame] | 3273 | } |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3274 | |
Qu Wenruo | 64cfaef | 2017-12-12 15:34:31 +0800 | [diff] [blame] | 3275 | static void qgroup_convert_meta(struct btrfs_fs_info *fs_info, u64 ref_root, |
| 3276 | int num_bytes) |
| 3277 | { |
| 3278 | struct btrfs_root *quota_root = fs_info->quota_root; |
| 3279 | struct btrfs_qgroup *qgroup; |
| 3280 | struct ulist_node *unode; |
| 3281 | struct ulist_iterator uiter; |
| 3282 | int ret = 0; |
| 3283 | |
| 3284 | if (num_bytes == 0) |
| 3285 | return; |
| 3286 | if (!quota_root) |
| 3287 | return; |
| 3288 | |
| 3289 | spin_lock(&fs_info->qgroup_lock); |
| 3290 | qgroup = find_qgroup_rb(fs_info, ref_root); |
| 3291 | if (!qgroup) |
| 3292 | goto out; |
| 3293 | ulist_reinit(fs_info->qgroup_ulist); |
| 3294 | ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid, |
David Sterba | a1840b5 | 2018-03-27 19:04:50 +0200 | [diff] [blame] | 3295 | qgroup_to_aux(qgroup), GFP_ATOMIC); |
Qu Wenruo | 64cfaef | 2017-12-12 15:34:31 +0800 | [diff] [blame] | 3296 | if (ret < 0) |
| 3297 | goto out; |
| 3298 | ULIST_ITER_INIT(&uiter); |
| 3299 | while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) { |
| 3300 | struct btrfs_qgroup *qg; |
| 3301 | struct btrfs_qgroup_list *glist; |
| 3302 | |
| 3303 | qg = unode_aux_to_qgroup(unode); |
| 3304 | |
| 3305 | qgroup_rsv_release(fs_info, qg, num_bytes, |
| 3306 | BTRFS_QGROUP_RSV_META_PREALLOC); |
| 3307 | qgroup_rsv_add(fs_info, qg, num_bytes, |
| 3308 | BTRFS_QGROUP_RSV_META_PERTRANS); |
| 3309 | list_for_each_entry(glist, &qg->groups, next_group) { |
| 3310 | ret = ulist_add(fs_info->qgroup_ulist, |
| 3311 | glist->group->qgroupid, |
David Sterba | a1840b5 | 2018-03-27 19:04:50 +0200 | [diff] [blame] | 3312 | qgroup_to_aux(glist->group), GFP_ATOMIC); |
Qu Wenruo | 64cfaef | 2017-12-12 15:34:31 +0800 | [diff] [blame] | 3313 | if (ret < 0) |
| 3314 | goto out; |
| 3315 | } |
| 3316 | } |
| 3317 | out: |
| 3318 | spin_unlock(&fs_info->qgroup_lock); |
| 3319 | } |
| 3320 | |
| 3321 | void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes) |
| 3322 | { |
| 3323 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 3324 | |
| 3325 | if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) || |
| 3326 | !is_fstree(root->objectid)) |
| 3327 | return; |
Qu Wenruo | 8287475 | 2017-12-12 15:34:34 +0800 | [diff] [blame] | 3328 | /* Same as btrfs_qgroup_free_meta_prealloc() */ |
| 3329 | num_bytes = sub_root_meta_rsv(root, num_bytes, |
| 3330 | BTRFS_QGROUP_RSV_META_PREALLOC); |
Qu Wenruo | 4ee0d88 | 2017-12-12 15:34:35 +0800 | [diff] [blame] | 3331 | trace_qgroup_meta_convert(root, num_bytes); |
Qu Wenruo | 64cfaef | 2017-12-12 15:34:31 +0800 | [diff] [blame] | 3332 | qgroup_convert_meta(fs_info, root->objectid, num_bytes); |
| 3333 | } |
| 3334 | |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3335 | /* |
Nicholas D Steeves | 0132761 | 2016-05-19 21:18:45 -0400 | [diff] [blame] | 3336 | * Check qgroup reserved space leaking, normally at destroy inode |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3337 | * time |
| 3338 | */ |
| 3339 | void btrfs_qgroup_check_reserved_leak(struct inode *inode) |
| 3340 | { |
| 3341 | struct extent_changeset changeset; |
| 3342 | struct ulist_node *unode; |
| 3343 | struct ulist_iterator iter; |
| 3344 | int ret; |
| 3345 | |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3346 | extent_changeset_init(&changeset); |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3347 | ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1, |
David Sterba | f734c44 | 2016-04-26 23:54:39 +0200 | [diff] [blame] | 3348 | EXTENT_QGROUP_RESERVED, &changeset); |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3349 | |
| 3350 | WARN_ON(ret < 0); |
| 3351 | if (WARN_ON(changeset.bytes_changed)) { |
| 3352 | ULIST_ITER_INIT(&iter); |
David Sterba | 53d3235 | 2017-02-13 13:42:29 +0100 | [diff] [blame] | 3353 | while ((unode = ulist_next(&changeset.range_changed, &iter))) { |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3354 | btrfs_warn(BTRFS_I(inode)->root->fs_info, |
| 3355 | "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu", |
| 3356 | inode->i_ino, unode->val, unode->aux); |
| 3357 | } |
David Sterba | 0b08e1f | 2017-02-13 14:24:35 +0100 | [diff] [blame] | 3358 | btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info, |
| 3359 | BTRFS_I(inode)->root->objectid, |
Qu Wenruo | d4e5c92 | 2017-12-12 15:34:23 +0800 | [diff] [blame] | 3360 | changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA); |
David Sterba | 0b08e1f | 2017-02-13 14:24:35 +0100 | [diff] [blame] | 3361 | |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3362 | } |
Qu Wenruo | 364ecf3 | 2017-02-27 15:10:38 +0800 | [diff] [blame] | 3363 | extent_changeset_release(&changeset); |
Qu Wenruo | 56fa9d0 | 2015-10-13 09:53:10 +0800 | [diff] [blame] | 3364 | } |