Josef Bacik | 8673634 | 2019-06-19 15:12:00 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | #include "ctree.h" |
| 4 | #include "delalloc-space.h" |
| 5 | #include "block-rsv.h" |
| 6 | #include "btrfs_inode.h" |
| 7 | #include "space-info.h" |
| 8 | #include "transaction.h" |
| 9 | #include "qgroup.h" |
Josef Bacik | 07730d8 | 2019-06-20 15:38:04 -0400 | [diff] [blame] | 10 | #include "block-group.h" |
Josef Bacik | 8673634 | 2019-06-19 15:12:00 -0400 | [diff] [blame] | 11 | |
| 12 | int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes) |
| 13 | { |
| 14 | struct btrfs_root *root = inode->root; |
| 15 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 16 | struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; |
| 17 | u64 used; |
| 18 | int ret = 0; |
| 19 | int need_commit = 2; |
| 20 | int have_pinned_space; |
| 21 | |
| 22 | /* Make sure bytes are sectorsize aligned */ |
| 23 | bytes = ALIGN(bytes, fs_info->sectorsize); |
| 24 | |
| 25 | if (btrfs_is_free_space_inode(inode)) { |
| 26 | need_commit = 0; |
| 27 | ASSERT(current->journal_info); |
| 28 | } |
| 29 | |
| 30 | again: |
| 31 | /* Make sure we have enough space to handle the data first */ |
| 32 | spin_lock(&data_sinfo->lock); |
| 33 | used = btrfs_space_info_used(data_sinfo, true); |
| 34 | |
| 35 | if (used + bytes > data_sinfo->total_bytes) { |
| 36 | struct btrfs_trans_handle *trans; |
| 37 | |
| 38 | /* |
| 39 | * If we don't have enough free bytes in this space then we need |
| 40 | * to alloc a new chunk. |
| 41 | */ |
| 42 | if (!data_sinfo->full) { |
| 43 | u64 alloc_target; |
| 44 | |
| 45 | data_sinfo->force_alloc = CHUNK_ALLOC_FORCE; |
| 46 | spin_unlock(&data_sinfo->lock); |
| 47 | |
| 48 | alloc_target = btrfs_data_alloc_profile(fs_info); |
| 49 | /* |
| 50 | * It is ugly that we don't call nolock join |
| 51 | * transaction for the free space inode case here. |
| 52 | * But it is safe because we only do the data space |
| 53 | * reservation for the free space cache in the |
| 54 | * transaction context, the common join transaction |
| 55 | * just increase the counter of the current transaction |
| 56 | * handler, doesn't try to acquire the trans_lock of |
| 57 | * the fs. |
| 58 | */ |
| 59 | trans = btrfs_join_transaction(root); |
| 60 | if (IS_ERR(trans)) |
| 61 | return PTR_ERR(trans); |
| 62 | |
| 63 | ret = btrfs_chunk_alloc(trans, alloc_target, |
| 64 | CHUNK_ALLOC_NO_FORCE); |
| 65 | btrfs_end_transaction(trans); |
| 66 | if (ret < 0) { |
| 67 | if (ret != -ENOSPC) |
| 68 | return ret; |
| 69 | else { |
| 70 | have_pinned_space = 1; |
| 71 | goto commit_trans; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | goto again; |
| 76 | } |
| 77 | |
| 78 | /* |
| 79 | * If we don't have enough pinned space to deal with this |
| 80 | * allocation, and no removed chunk in current transaction, |
| 81 | * don't bother committing the transaction. |
| 82 | */ |
| 83 | have_pinned_space = __percpu_counter_compare( |
| 84 | &data_sinfo->total_bytes_pinned, |
| 85 | used + bytes - data_sinfo->total_bytes, |
| 86 | BTRFS_TOTAL_BYTES_PINNED_BATCH); |
| 87 | spin_unlock(&data_sinfo->lock); |
| 88 | |
| 89 | /* Commit the current transaction and try again */ |
| 90 | commit_trans: |
| 91 | if (need_commit) { |
| 92 | need_commit--; |
| 93 | |
| 94 | if (need_commit > 0) { |
| 95 | btrfs_start_delalloc_roots(fs_info, -1); |
| 96 | btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, |
| 97 | (u64)-1); |
| 98 | } |
| 99 | |
| 100 | trans = btrfs_join_transaction(root); |
| 101 | if (IS_ERR(trans)) |
| 102 | return PTR_ERR(trans); |
| 103 | if (have_pinned_space >= 0 || |
| 104 | test_bit(BTRFS_TRANS_HAVE_FREE_BGS, |
| 105 | &trans->transaction->flags) || |
| 106 | need_commit > 0) { |
| 107 | ret = btrfs_commit_transaction(trans); |
| 108 | if (ret) |
| 109 | return ret; |
| 110 | /* |
| 111 | * The cleaner kthread might still be doing iput |
| 112 | * operations. Wait for it to finish so that |
| 113 | * more space is released. We don't need to |
| 114 | * explicitly run the delayed iputs here because |
| 115 | * the commit_transaction would have woken up |
| 116 | * the cleaner. |
| 117 | */ |
| 118 | ret = btrfs_wait_on_delayed_iputs(fs_info); |
| 119 | if (ret) |
| 120 | return ret; |
| 121 | goto again; |
| 122 | } else { |
| 123 | btrfs_end_transaction(trans); |
| 124 | } |
| 125 | } |
| 126 | |
| 127 | trace_btrfs_space_reservation(fs_info, |
| 128 | "space_info:enospc", |
| 129 | data_sinfo->flags, bytes, 1); |
| 130 | return -ENOSPC; |
| 131 | } |
| 132 | btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes); |
| 133 | trace_btrfs_space_reservation(fs_info, "space_info", |
| 134 | data_sinfo->flags, bytes, 1); |
| 135 | spin_unlock(&data_sinfo->lock); |
| 136 | |
| 137 | return 0; |
| 138 | } |
| 139 | |
| 140 | int btrfs_check_data_free_space(struct inode *inode, |
| 141 | struct extent_changeset **reserved, u64 start, u64 len) |
| 142 | { |
| 143 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| 144 | int ret; |
| 145 | |
| 146 | /* align the range */ |
| 147 | len = round_up(start + len, fs_info->sectorsize) - |
| 148 | round_down(start, fs_info->sectorsize); |
| 149 | start = round_down(start, fs_info->sectorsize); |
| 150 | |
| 151 | ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len); |
| 152 | if (ret < 0) |
| 153 | return ret; |
| 154 | |
| 155 | /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */ |
| 156 | ret = btrfs_qgroup_reserve_data(inode, reserved, start, len); |
| 157 | if (ret < 0) |
| 158 | btrfs_free_reserved_data_space_noquota(inode, start, len); |
| 159 | else |
| 160 | ret = 0; |
| 161 | return ret; |
| 162 | } |
| 163 | |
| 164 | /* |
| 165 | * Called if we need to clear a data reservation for this inode |
| 166 | * Normally in a error case. |
| 167 | * |
| 168 | * This one will *NOT* use accurate qgroup reserved space API, just for case |
| 169 | * which we can't sleep and is sure it won't affect qgroup reserved space. |
| 170 | * Like clear_bit_hook(). |
| 171 | */ |
| 172 | void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, |
| 173 | u64 len) |
| 174 | { |
| 175 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
| 176 | struct btrfs_space_info *data_sinfo; |
| 177 | |
| 178 | /* Make sure the range is aligned to sectorsize */ |
| 179 | len = round_up(start + len, fs_info->sectorsize) - |
| 180 | round_down(start, fs_info->sectorsize); |
| 181 | start = round_down(start, fs_info->sectorsize); |
| 182 | |
| 183 | data_sinfo = fs_info->data_sinfo; |
| 184 | spin_lock(&data_sinfo->lock); |
| 185 | btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, -len); |
| 186 | trace_btrfs_space_reservation(fs_info, "space_info", |
| 187 | data_sinfo->flags, len, 0); |
| 188 | spin_unlock(&data_sinfo->lock); |
| 189 | } |
| 190 | |
| 191 | /* |
| 192 | * Called if we need to clear a data reservation for this inode |
| 193 | * Normally in a error case. |
| 194 | * |
| 195 | * This one will handle the per-inode data rsv map for accurate reserved |
| 196 | * space framework. |
| 197 | */ |
| 198 | void btrfs_free_reserved_data_space(struct inode *inode, |
| 199 | struct extent_changeset *reserved, u64 start, u64 len) |
| 200 | { |
| 201 | struct btrfs_root *root = BTRFS_I(inode)->root; |
| 202 | |
| 203 | /* Make sure the range is aligned to sectorsize */ |
| 204 | len = round_up(start + len, root->fs_info->sectorsize) - |
| 205 | round_down(start, root->fs_info->sectorsize); |
| 206 | start = round_down(start, root->fs_info->sectorsize); |
| 207 | |
| 208 | btrfs_free_reserved_data_space_noquota(inode, start, len); |
| 209 | btrfs_qgroup_free_data(inode, reserved, start, len); |
| 210 | } |
| 211 | |
| 212 | /** |
| 213 | * btrfs_inode_rsv_release - release any excessive reservation. |
| 214 | * @inode - the inode we need to release from. |
| 215 | * @qgroup_free - free or convert qgroup meta. |
| 216 | * Unlike normal operation, qgroup meta reservation needs to know if we are |
| 217 | * freeing qgroup reservation or just converting it into per-trans. Normally |
| 218 | * @qgroup_free is true for error handling, and false for normal release. |
| 219 | * |
| 220 | * This is the same as btrfs_block_rsv_release, except that it handles the |
| 221 | * tracepoint for the reservation. |
| 222 | */ |
| 223 | static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free) |
| 224 | { |
| 225 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
| 226 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; |
| 227 | u64 released = 0; |
| 228 | u64 qgroup_to_release = 0; |
| 229 | |
| 230 | /* |
| 231 | * Since we statically set the block_rsv->size we just want to say we |
| 232 | * are releasing 0 bytes, and then we'll just get the reservation over |
| 233 | * the size free'd. |
| 234 | */ |
| 235 | released = __btrfs_block_rsv_release(fs_info, block_rsv, 0, |
| 236 | &qgroup_to_release); |
| 237 | if (released > 0) |
| 238 | trace_btrfs_space_reservation(fs_info, "delalloc", |
| 239 | btrfs_ino(inode), released, 0); |
| 240 | if (qgroup_free) |
| 241 | btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release); |
| 242 | else |
| 243 | btrfs_qgroup_convert_reserved_meta(inode->root, |
| 244 | qgroup_to_release); |
| 245 | } |
| 246 | |
| 247 | static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, |
| 248 | struct btrfs_inode *inode) |
| 249 | { |
| 250 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; |
| 251 | u64 reserve_size = 0; |
| 252 | u64 qgroup_rsv_size = 0; |
| 253 | u64 csum_leaves; |
| 254 | unsigned outstanding_extents; |
| 255 | |
| 256 | lockdep_assert_held(&inode->lock); |
| 257 | outstanding_extents = inode->outstanding_extents; |
| 258 | if (outstanding_extents) |
Josef Bacik | 2bd36e7 | 2019-08-22 15:14:33 -0400 | [diff] [blame^] | 259 | reserve_size = btrfs_calc_insert_metadata_size(fs_info, |
Josef Bacik | 8673634 | 2019-06-19 15:12:00 -0400 | [diff] [blame] | 260 | outstanding_extents + 1); |
| 261 | csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, |
| 262 | inode->csum_bytes); |
Josef Bacik | 2bd36e7 | 2019-08-22 15:14:33 -0400 | [diff] [blame^] | 263 | reserve_size += btrfs_calc_insert_metadata_size(fs_info, |
| 264 | csum_leaves); |
Josef Bacik | 8673634 | 2019-06-19 15:12:00 -0400 | [diff] [blame] | 265 | /* |
| 266 | * For qgroup rsv, the calculation is very simple: |
| 267 | * account one nodesize for each outstanding extent |
| 268 | * |
| 269 | * This is overestimating in most cases. |
| 270 | */ |
| 271 | qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize; |
| 272 | |
| 273 | spin_lock(&block_rsv->lock); |
| 274 | block_rsv->size = reserve_size; |
| 275 | block_rsv->qgroup_rsv_size = qgroup_rsv_size; |
| 276 | spin_unlock(&block_rsv->lock); |
| 277 | } |
| 278 | |
| 279 | static void calc_inode_reservations(struct btrfs_fs_info *fs_info, |
| 280 | u64 num_bytes, u64 *meta_reserve, |
| 281 | u64 *qgroup_reserve) |
| 282 | { |
| 283 | u64 nr_extents = count_max_extents(num_bytes); |
| 284 | u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, num_bytes); |
| 285 | |
| 286 | /* We add one for the inode update at finish ordered time */ |
Josef Bacik | 2bd36e7 | 2019-08-22 15:14:33 -0400 | [diff] [blame^] | 287 | *meta_reserve = btrfs_calc_insert_metadata_size(fs_info, |
Josef Bacik | 8673634 | 2019-06-19 15:12:00 -0400 | [diff] [blame] | 288 | nr_extents + csum_leaves + 1); |
| 289 | *qgroup_reserve = nr_extents * fs_info->nodesize; |
| 290 | } |
| 291 | |
| 292 | int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) |
| 293 | { |
| 294 | struct btrfs_root *root = inode->root; |
| 295 | struct btrfs_fs_info *fs_info = root->fs_info; |
| 296 | struct btrfs_block_rsv *block_rsv = &inode->block_rsv; |
| 297 | u64 meta_reserve, qgroup_reserve; |
| 298 | unsigned nr_extents; |
| 299 | enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; |
| 300 | int ret = 0; |
| 301 | bool delalloc_lock = true; |
| 302 | |
| 303 | /* |
| 304 | * If we are a free space inode we need to not flush since we will be in |
| 305 | * the middle of a transaction commit. We also don't need the delalloc |
| 306 | * mutex since we won't race with anybody. We need this mostly to make |
| 307 | * lockdep shut its filthy mouth. |
| 308 | * |
| 309 | * If we have a transaction open (can happen if we call truncate_block |
| 310 | * from truncate), then we need FLUSH_LIMIT so we don't deadlock. |
| 311 | */ |
| 312 | if (btrfs_is_free_space_inode(inode)) { |
| 313 | flush = BTRFS_RESERVE_NO_FLUSH; |
| 314 | delalloc_lock = false; |
| 315 | } else { |
| 316 | if (current->journal_info) |
| 317 | flush = BTRFS_RESERVE_FLUSH_LIMIT; |
| 318 | |
| 319 | if (btrfs_transaction_in_commit(fs_info)) |
| 320 | schedule_timeout(1); |
| 321 | } |
| 322 | |
| 323 | if (delalloc_lock) |
| 324 | mutex_lock(&inode->delalloc_mutex); |
| 325 | |
| 326 | num_bytes = ALIGN(num_bytes, fs_info->sectorsize); |
| 327 | |
| 328 | /* |
| 329 | * We always want to do it this way, every other way is wrong and ends |
| 330 | * in tears. Pre-reserving the amount we are going to add will always |
| 331 | * be the right way, because otherwise if we have enough parallelism we |
| 332 | * could end up with thousands of inodes all holding little bits of |
| 333 | * reservations they were able to make previously and the only way to |
| 334 | * reclaim that space is to ENOSPC out the operations and clear |
| 335 | * everything out and try again, which is bad. This way we just |
| 336 | * over-reserve slightly, and clean up the mess when we are done. |
| 337 | */ |
| 338 | calc_inode_reservations(fs_info, num_bytes, &meta_reserve, |
| 339 | &qgroup_reserve); |
| 340 | ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true); |
| 341 | if (ret) |
| 342 | goto out_fail; |
| 343 | ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush); |
| 344 | if (ret) |
| 345 | goto out_qgroup; |
| 346 | |
| 347 | /* |
| 348 | * Now we need to update our outstanding extents and csum bytes _first_ |
| 349 | * and then add the reservation to the block_rsv. This keeps us from |
| 350 | * racing with an ordered completion or some such that would think it |
| 351 | * needs to free the reservation we just made. |
| 352 | */ |
| 353 | spin_lock(&inode->lock); |
| 354 | nr_extents = count_max_extents(num_bytes); |
| 355 | btrfs_mod_outstanding_extents(inode, nr_extents); |
| 356 | inode->csum_bytes += num_bytes; |
| 357 | btrfs_calculate_inode_block_rsv_size(fs_info, inode); |
| 358 | spin_unlock(&inode->lock); |
| 359 | |
| 360 | /* Now we can safely add our space to our block rsv */ |
| 361 | btrfs_block_rsv_add_bytes(block_rsv, meta_reserve, false); |
| 362 | trace_btrfs_space_reservation(root->fs_info, "delalloc", |
| 363 | btrfs_ino(inode), meta_reserve, 1); |
| 364 | |
| 365 | spin_lock(&block_rsv->lock); |
| 366 | block_rsv->qgroup_rsv_reserved += qgroup_reserve; |
| 367 | spin_unlock(&block_rsv->lock); |
| 368 | |
| 369 | if (delalloc_lock) |
| 370 | mutex_unlock(&inode->delalloc_mutex); |
| 371 | return 0; |
| 372 | out_qgroup: |
| 373 | btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve); |
| 374 | out_fail: |
| 375 | btrfs_inode_rsv_release(inode, true); |
| 376 | if (delalloc_lock) |
| 377 | mutex_unlock(&inode->delalloc_mutex); |
| 378 | return ret; |
| 379 | } |
| 380 | |
| 381 | /** |
| 382 | * btrfs_delalloc_release_metadata - release a metadata reservation for an inode |
| 383 | * @inode: the inode to release the reservation for. |
| 384 | * @num_bytes: the number of bytes we are releasing. |
| 385 | * @qgroup_free: free qgroup reservation or convert it to per-trans reservation |
| 386 | * |
| 387 | * This will release the metadata reservation for an inode. This can be called |
| 388 | * once we complete IO for a given set of bytes to release their metadata |
| 389 | * reservations, or on error for the same reason. |
| 390 | */ |
| 391 | void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, |
| 392 | bool qgroup_free) |
| 393 | { |
| 394 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
| 395 | |
| 396 | num_bytes = ALIGN(num_bytes, fs_info->sectorsize); |
| 397 | spin_lock(&inode->lock); |
| 398 | inode->csum_bytes -= num_bytes; |
| 399 | btrfs_calculate_inode_block_rsv_size(fs_info, inode); |
| 400 | spin_unlock(&inode->lock); |
| 401 | |
| 402 | if (btrfs_is_testing(fs_info)) |
| 403 | return; |
| 404 | |
| 405 | btrfs_inode_rsv_release(inode, qgroup_free); |
| 406 | } |
| 407 | |
| 408 | /** |
| 409 | * btrfs_delalloc_release_extents - release our outstanding_extents |
| 410 | * @inode: the inode to balance the reservation for. |
| 411 | * @num_bytes: the number of bytes we originally reserved with |
| 412 | * @qgroup_free: do we need to free qgroup meta reservation or convert them. |
| 413 | * |
| 414 | * When we reserve space we increase outstanding_extents for the extents we may |
| 415 | * add. Once we've set the range as delalloc or created our ordered extents we |
| 416 | * have outstanding_extents to track the real usage, so we use this to free our |
| 417 | * temporarily tracked outstanding_extents. This _must_ be used in conjunction |
| 418 | * with btrfs_delalloc_reserve_metadata. |
| 419 | */ |
| 420 | void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes, |
| 421 | bool qgroup_free) |
| 422 | { |
| 423 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
| 424 | unsigned num_extents; |
| 425 | |
| 426 | spin_lock(&inode->lock); |
| 427 | num_extents = count_max_extents(num_bytes); |
| 428 | btrfs_mod_outstanding_extents(inode, -num_extents); |
| 429 | btrfs_calculate_inode_block_rsv_size(fs_info, inode); |
| 430 | spin_unlock(&inode->lock); |
| 431 | |
| 432 | if (btrfs_is_testing(fs_info)) |
| 433 | return; |
| 434 | |
| 435 | btrfs_inode_rsv_release(inode, qgroup_free); |
| 436 | } |
| 437 | |
| 438 | /** |
| 439 | * btrfs_delalloc_reserve_space - reserve data and metadata space for |
| 440 | * delalloc |
| 441 | * @inode: inode we're writing to |
| 442 | * @start: start range we are writing to |
| 443 | * @len: how long the range we are writing to |
| 444 | * @reserved: mandatory parameter, record actually reserved qgroup ranges of |
| 445 | * current reservation. |
| 446 | * |
| 447 | * This will do the following things |
| 448 | * |
| 449 | * - reserve space in data space info for num bytes |
| 450 | * and reserve precious corresponding qgroup space |
| 451 | * (Done in check_data_free_space) |
| 452 | * |
| 453 | * - reserve space for metadata space, based on the number of outstanding |
| 454 | * extents and how much csums will be needed |
| 455 | * also reserve metadata space in a per root over-reserve method. |
| 456 | * - add to the inodes->delalloc_bytes |
| 457 | * - add it to the fs_info's delalloc inodes list. |
| 458 | * (Above 3 all done in delalloc_reserve_metadata) |
| 459 | * |
| 460 | * Return 0 for success |
| 461 | * Return <0 for error(-ENOSPC or -EQUOT) |
| 462 | */ |
| 463 | int btrfs_delalloc_reserve_space(struct inode *inode, |
| 464 | struct extent_changeset **reserved, u64 start, u64 len) |
| 465 | { |
| 466 | int ret; |
| 467 | |
| 468 | ret = btrfs_check_data_free_space(inode, reserved, start, len); |
| 469 | if (ret < 0) |
| 470 | return ret; |
| 471 | ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len); |
| 472 | if (ret < 0) |
| 473 | btrfs_free_reserved_data_space(inode, *reserved, start, len); |
| 474 | return ret; |
| 475 | } |
| 476 | |
| 477 | /** |
| 478 | * btrfs_delalloc_release_space - release data and metadata space for delalloc |
| 479 | * @inode: inode we're releasing space for |
| 480 | * @start: start position of the space already reserved |
| 481 | * @len: the len of the space already reserved |
| 482 | * @release_bytes: the len of the space we consumed or didn't use |
| 483 | * |
| 484 | * This function will release the metadata space that was not used and will |
| 485 | * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes |
| 486 | * list if there are no delalloc bytes left. |
| 487 | * Also it will handle the qgroup reserved space. |
| 488 | */ |
| 489 | void btrfs_delalloc_release_space(struct inode *inode, |
| 490 | struct extent_changeset *reserved, |
| 491 | u64 start, u64 len, bool qgroup_free) |
| 492 | { |
| 493 | btrfs_delalloc_release_metadata(BTRFS_I(inode), len, qgroup_free); |
| 494 | btrfs_free_reserved_data_space(inode, reserved, start, len); |
| 495 | } |