blob: 1545c22ef2804ce1f3690febd0565ecf2339d8d1 [file] [log] [blame]
David Sterbac1d7c512018-04-03 19:23:33 +02001// SPDX-License-Identifier: GPL-2.0
Chris Mason6cbd5572007-06-12 09:07:21 -04002/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
Chris Mason6cbd5572007-06-12 09:07:21 -04004 */
5
Chris Mason065631f2008-02-20 12:07:25 -05006#include <linux/bio.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09007#include <linux/slab.h>
Chris Mason065631f2008-02-20 12:07:25 -05008#include <linux/pagemap.h>
9#include <linux/highmem.h>
Nikolay Borisova3d46ae2019-04-01 11:29:58 +030010#include <linux/sched/mm.h>
Johannes Thumshirnd5178572019-06-03 16:58:57 +020011#include <crypto/hash.h>
Chris Mason1e1d2702007-03-15 19:03:33 -040012#include "ctree.h"
Chris Masondee26a92007-03-26 16:00:06 -040013#include "disk-io.h"
Chris Mason9f5fae22007-03-20 14:38:32 -040014#include "transaction.h"
Miao Xiefacc8a222013-07-25 19:22:34 +080015#include "volumes.h"
Chris Mason1de037a2007-05-29 15:17:08 -040016#include "print-tree.h"
Anand Jainebb87652016-03-10 17:26:59 +080017#include "compression.h"
Chris Mason1e1d2702007-03-15 19:03:33 -040018
Chris Mason42049bf2016-08-03 14:05:46 -070019#define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
20 sizeof(struct btrfs_item) * 2) / \
21 size) - 1))
Yan Zheng07d400a2009-01-06 11:42:00 -050022
Zach Brown221b8312012-09-20 14:33:00 -060023#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030024 PAGE_SIZE))
Chris Mason7ca4be42012-01-31 20:19:02 -050025
Josef Bacik41a2ee72020-01-17 09:02:21 -050026/**
27 * @inode - the inode we want to update the disk_i_size for
28 * @new_i_size - the i_size we want to set to, 0 if we use i_size
29 *
30 * With NO_HOLES set this simply sets the disk_is_size to whatever i_size_read()
31 * returns as it is perfectly fine with a file that has holes without hole file
32 * extent items.
33 *
34 * However without NO_HOLES we need to only return the area that is contiguous
35 * from the 0 offset of the file. Otherwise we could end up adjust i_size up
36 * to an extent that has a gap in between.
37 *
38 * Finally new_i_size should only be set in the case of truncate where we're not
39 * ready to use i_size_read() as the limiter yet.
40 */
Nikolay Borisov76aea532020-11-02 16:48:53 +020041void btrfs_inode_safe_disk_i_size_write(struct btrfs_inode *inode, u64 new_i_size)
Josef Bacik41a2ee72020-01-17 09:02:21 -050042{
Nikolay Borisov76aea532020-11-02 16:48:53 +020043 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Josef Bacik41a2ee72020-01-17 09:02:21 -050044 u64 start, end, i_size;
45 int ret;
46
Nikolay Borisov76aea532020-11-02 16:48:53 +020047 i_size = new_i_size ?: i_size_read(&inode->vfs_inode);
Josef Bacik41a2ee72020-01-17 09:02:21 -050048 if (btrfs_fs_incompat(fs_info, NO_HOLES)) {
Nikolay Borisov76aea532020-11-02 16:48:53 +020049 inode->disk_i_size = i_size;
Josef Bacik41a2ee72020-01-17 09:02:21 -050050 return;
51 }
52
Nikolay Borisov76aea532020-11-02 16:48:53 +020053 spin_lock(&inode->lock);
54 ret = find_contiguous_extent_bit(&inode->file_extent_tree, 0, &start,
55 &end, EXTENT_DIRTY);
Josef Bacik41a2ee72020-01-17 09:02:21 -050056 if (!ret && start == 0)
57 i_size = min(i_size, end + 1);
58 else
59 i_size = 0;
Nikolay Borisov76aea532020-11-02 16:48:53 +020060 inode->disk_i_size = i_size;
61 spin_unlock(&inode->lock);
Josef Bacik41a2ee72020-01-17 09:02:21 -050062}
63
64/**
65 * @inode - the inode we're modifying
66 * @start - the start file offset of the file extent we've inserted
67 * @len - the logical length of the file extent item
68 *
69 * Call when we are inserting a new file extent where there was none before.
70 * Does not need to call this in the case where we're replacing an existing file
71 * extent, however if not sure it's fine to call this multiple times.
72 *
73 * The start and len must match the file extent item, so thus must be sectorsize
74 * aligned.
75 */
76int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start,
77 u64 len)
78{
79 if (len == 0)
80 return 0;
81
82 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize));
83
84 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
85 return 0;
86 return set_extent_bits(&inode->file_extent_tree, start, start + len - 1,
87 EXTENT_DIRTY);
88}
89
90/**
91 * @inode - the inode we're modifying
92 * @start - the start file offset of the file extent we've inserted
93 * @len - the logical length of the file extent item
94 *
95 * Called when we drop a file extent, for example when we truncate. Doesn't
96 * need to be called for cases where we're replacing a file extent, like when
97 * we've COWed a file extent.
98 *
99 * The start and len must match the file extent item, so thus must be sectorsize
100 * aligned.
101 */
102int btrfs_inode_clear_file_extent_range(struct btrfs_inode *inode, u64 start,
103 u64 len)
104{
105 if (len == 0)
106 return 0;
107
108 ASSERT(IS_ALIGNED(start + len, inode->root->fs_info->sectorsize) ||
109 len == (u64)-1);
110
111 if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES))
112 return 0;
113 return clear_extent_bit(&inode->file_extent_tree, start,
114 start + len - 1, EXTENT_DIRTY, 0, 0, NULL);
115}
116
Johannes Thumshirn1e25a2e2019-05-22 10:19:01 +0200117static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
118 u16 csum_size)
119{
120 u32 ncsums = (PAGE_SIZE - sizeof(struct btrfs_ordered_sum)) / csum_size;
121
122 return ncsums * fs_info->sectorsize;
123}
Yan Zheng07d400a2009-01-06 11:42:00 -0500124
Chris Masonb18c6682007-04-17 13:26:50 -0400125int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
Sage Weilf2eb0a22008-05-02 14:43:14 -0400126 struct btrfs_root *root,
127 u64 objectid, u64 pos,
128 u64 disk_offset, u64 disk_num_bytes,
Chris Masonc8b97812008-10-29 14:49:59 -0400129 u64 num_bytes, u64 offset, u64 ram_bytes,
130 u8 compression, u8 encryption, u16 other_encoding)
Chris Mason9f5fae22007-03-20 14:38:32 -0400131{
Chris Masondee26a92007-03-26 16:00:06 -0400132 int ret = 0;
133 struct btrfs_file_extent_item *item;
134 struct btrfs_key file_key;
Chris Mason5caf2a02007-04-02 11:20:42 -0400135 struct btrfs_path *path;
Chris Mason5f39d392007-10-15 16:14:19 -0400136 struct extent_buffer *leaf;
Chris Masondee26a92007-03-26 16:00:06 -0400137
Chris Mason5caf2a02007-04-02 11:20:42 -0400138 path = btrfs_alloc_path();
Tsutomu Itohdb5b4932011-03-23 08:14:16 +0000139 if (!path)
140 return -ENOMEM;
Chris Masondee26a92007-03-26 16:00:06 -0400141 file_key.objectid = objectid;
Chris Masonb18c6682007-04-17 13:26:50 -0400142 file_key.offset = pos;
David Sterba962a2982014-06-04 18:41:45 +0200143 file_key.type = BTRFS_EXTENT_DATA_KEY;
Chris Masondee26a92007-03-26 16:00:06 -0400144
Chris Mason5caf2a02007-04-02 11:20:42 -0400145 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
Chris Masondee26a92007-03-26 16:00:06 -0400146 sizeof(*item));
Chris Mason54aa1f42007-06-22 14:16:25 -0400147 if (ret < 0)
148 goto out;
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100149 BUG_ON(ret); /* Can't happen */
Chris Mason5f39d392007-10-15 16:14:19 -0400150 leaf = path->nodes[0];
151 item = btrfs_item_ptr(leaf, path->slots[0],
Chris Masondee26a92007-03-26 16:00:06 -0400152 struct btrfs_file_extent_item);
Sage Weilf2eb0a22008-05-02 14:43:14 -0400153 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
Chris Masondb945352007-10-15 16:15:53 -0400154 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
Sage Weilf2eb0a22008-05-02 14:43:14 -0400155 btrfs_set_file_extent_offset(leaf, item, offset);
Chris Masondb945352007-10-15 16:15:53 -0400156 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
Chris Masonc8b97812008-10-29 14:49:59 -0400157 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
Chris Mason5f39d392007-10-15 16:14:19 -0400158 btrfs_set_file_extent_generation(leaf, item, trans->transid);
159 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
Chris Masonc8b97812008-10-29 14:49:59 -0400160 btrfs_set_file_extent_compression(leaf, item, compression);
161 btrfs_set_file_extent_encryption(leaf, item, encryption);
162 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
163
Chris Mason5f39d392007-10-15 16:14:19 -0400164 btrfs_mark_buffer_dirty(leaf);
Chris Mason54aa1f42007-06-22 14:16:25 -0400165out:
Chris Mason5caf2a02007-04-02 11:20:42 -0400166 btrfs_free_path(path);
Chris Mason54aa1f42007-06-22 14:16:25 -0400167 return ret;
Chris Mason9f5fae22007-03-20 14:38:32 -0400168}
Chris Masondee26a92007-03-26 16:00:06 -0400169
Eric Sandeen48a3b632013-04-25 20:41:01 +0000170static struct btrfs_csum_item *
171btrfs_lookup_csum(struct btrfs_trans_handle *trans,
172 struct btrfs_root *root,
173 struct btrfs_path *path,
174 u64 bytenr, int cow)
Chris Mason6567e832007-04-16 09:22:45 -0400175{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400176 struct btrfs_fs_info *fs_info = root->fs_info;
Chris Mason6567e832007-04-16 09:22:45 -0400177 int ret;
178 struct btrfs_key file_key;
179 struct btrfs_key found_key;
180 struct btrfs_csum_item *item;
Chris Mason5f39d392007-10-15 16:14:19 -0400181 struct extent_buffer *leaf;
Chris Mason6567e832007-04-16 09:22:45 -0400182 u64 csum_offset = 0;
David Sterba223486c2020-07-02 11:27:30 +0200183 const u32 csum_size = fs_info->csum_size;
Chris Masona429e512007-04-18 16:15:28 -0400184 int csums_in_item;
Chris Mason6567e832007-04-16 09:22:45 -0400185
Chris Masond20f7042008-12-08 16:58:54 -0500186 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
187 file_key.offset = bytenr;
David Sterba962a2982014-06-04 18:41:45 +0200188 file_key.type = BTRFS_EXTENT_CSUM_KEY;
Chris Masonb18c6682007-04-17 13:26:50 -0400189 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
Chris Mason6567e832007-04-16 09:22:45 -0400190 if (ret < 0)
191 goto fail;
Chris Mason5f39d392007-10-15 16:14:19 -0400192 leaf = path->nodes[0];
Chris Mason6567e832007-04-16 09:22:45 -0400193 if (ret > 0) {
194 ret = 1;
Chris Mason70b2bef2007-04-17 15:39:32 -0400195 if (path->slots[0] == 0)
Chris Mason6567e832007-04-16 09:22:45 -0400196 goto fail;
197 path->slots[0]--;
Chris Mason5f39d392007-10-15 16:14:19 -0400198 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
David Sterba962a2982014-06-04 18:41:45 +0200199 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
Chris Mason6567e832007-04-16 09:22:45 -0400200 goto fail;
Chris Masond20f7042008-12-08 16:58:54 -0500201
202 csum_offset = (bytenr - found_key.offset) >>
David Sterba265fdfa2020-07-01 21:19:09 +0200203 fs_info->sectorsize_bits;
Chris Mason5f39d392007-10-15 16:14:19 -0400204 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
Josef Bacik607d4322008-12-02 07:17:45 -0500205 csums_in_item /= csum_size;
Chris Masona429e512007-04-18 16:15:28 -0400206
Miao Xie82d130f2013-03-28 08:12:15 +0000207 if (csum_offset == csums_in_item) {
Chris Masona429e512007-04-18 16:15:28 -0400208 ret = -EFBIG;
Chris Mason6567e832007-04-16 09:22:45 -0400209 goto fail;
Miao Xie82d130f2013-03-28 08:12:15 +0000210 } else if (csum_offset > csums_in_item) {
211 goto fail;
Chris Mason6567e832007-04-16 09:22:45 -0400212 }
213 }
214 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
Chris Mason509659c2007-05-10 12:36:17 -0400215 item = (struct btrfs_csum_item *)((unsigned char *)item +
Josef Bacik607d4322008-12-02 07:17:45 -0500216 csum_offset * csum_size);
Chris Mason6567e832007-04-16 09:22:45 -0400217 return item;
218fail:
219 if (ret > 0)
Chris Masonb18c6682007-04-17 13:26:50 -0400220 ret = -ENOENT;
Chris Mason6567e832007-04-16 09:22:45 -0400221 return ERR_PTR(ret);
222}
223
Chris Masondee26a92007-03-26 16:00:06 -0400224int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
225 struct btrfs_root *root,
226 struct btrfs_path *path, u64 objectid,
Chris Mason9773a782007-03-27 11:26:26 -0400227 u64 offset, int mod)
Chris Masondee26a92007-03-26 16:00:06 -0400228{
229 int ret;
230 struct btrfs_key file_key;
231 int ins_len = mod < 0 ? -1 : 0;
232 int cow = mod != 0;
233
234 file_key.objectid = objectid;
Chris Mason70b2bef2007-04-17 15:39:32 -0400235 file_key.offset = offset;
David Sterba962a2982014-06-04 18:41:45 +0200236 file_key.type = BTRFS_EXTENT_DATA_KEY;
Chris Masondee26a92007-03-26 16:00:06 -0400237 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
238 return ret;
239}
Chris Masonf254e522007-03-29 15:15:27 -0400240
Qu Wenruo62751932020-12-02 14:48:06 +0800241/*
242 * Find checksums for logical bytenr range [disk_bytenr, disk_bytenr + len) and
243 * estore the result to @dst.
244 *
245 * Return >0 for the number of sectors we found.
246 * Return 0 for the range [disk_bytenr, disk_bytenr + sectorsize) has no csum
247 * for it. Caller may want to try next sector until one range is hit.
248 * Return <0 for fatal error.
249 */
250static int search_csum_tree(struct btrfs_fs_info *fs_info,
251 struct btrfs_path *path, u64 disk_bytenr,
252 u64 len, u8 *dst)
253{
254 struct btrfs_csum_item *item = NULL;
255 struct btrfs_key key;
256 const u32 sectorsize = fs_info->sectorsize;
257 const u32 csum_size = fs_info->csum_size;
258 u32 itemsize;
259 int ret;
260 u64 csum_start;
261 u64 csum_len;
262
263 ASSERT(IS_ALIGNED(disk_bytenr, sectorsize) &&
264 IS_ALIGNED(len, sectorsize));
265
266 /* Check if the current csum item covers disk_bytenr */
267 if (path->nodes[0]) {
268 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
269 struct btrfs_csum_item);
270 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
271 itemsize = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
272
273 csum_start = key.offset;
274 csum_len = (itemsize / csum_size) * sectorsize;
275
276 if (in_range(disk_bytenr, csum_start, csum_len))
277 goto found;
278 }
279
280 /* Current item doesn't contain the desired range, search again */
281 btrfs_release_path(path);
282 item = btrfs_lookup_csum(NULL, fs_info->csum_root, path, disk_bytenr, 0);
283 if (IS_ERR(item)) {
284 ret = PTR_ERR(item);
285 goto out;
286 }
287 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
288 itemsize = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
289
290 csum_start = key.offset;
291 csum_len = (itemsize / csum_size) * sectorsize;
292 ASSERT(in_range(disk_bytenr, csum_start, csum_len));
293
294found:
295 ret = (min(csum_start + csum_len, disk_bytenr + len) -
296 disk_bytenr) >> fs_info->sectorsize_bits;
297 read_extent_buffer(path->nodes[0], dst, (unsigned long)item,
298 ret * csum_size);
299out:
300 if (ret == -ENOENT)
301 ret = 0;
302 return ret;
303}
304
305/*
306 * Locate the file_offset of @cur_disk_bytenr of a @bio.
307 *
308 * Bio of btrfs represents read range of
309 * [bi_sector << 9, bi_sector << 9 + bi_size).
310 * Knowing this, we can iterate through each bvec to locate the page belong to
311 * @cur_disk_bytenr and get the file offset.
312 *
313 * @inode is used to determine if the bvec page really belongs to @inode.
314 *
315 * Return 0 if we can't find the file offset
316 * Return >0 if we find the file offset and restore it to @file_offset_ret
317 */
318static int search_file_offset_in_bio(struct bio *bio, struct inode *inode,
319 u64 disk_bytenr, u64 *file_offset_ret)
320{
321 struct bvec_iter iter;
322 struct bio_vec bvec;
323 u64 cur = bio->bi_iter.bi_sector << SECTOR_SHIFT;
324 int ret = 0;
325
326 bio_for_each_segment(bvec, bio, iter) {
327 struct page *page = bvec.bv_page;
328
329 if (cur > disk_bytenr)
330 break;
331 if (cur + bvec.bv_len <= disk_bytenr) {
332 cur += bvec.bv_len;
333 continue;
334 }
335 ASSERT(in_range(disk_bytenr, cur, bvec.bv_len));
336 if (page->mapping && page->mapping->host &&
337 page->mapping->host == inode) {
338 ret = 1;
339 *file_offset_ret = page_offset(page) + bvec.bv_offset +
340 disk_bytenr - cur;
341 break;
342 }
343 }
344 return ret;
345}
346
Omar Sandovale62958f2019-12-02 17:34:17 -0800347/**
Qu Wenruo62751932020-12-02 14:48:06 +0800348 * Lookup the checksum for the read bio in csum tree.
Qu Wenruo9e464582020-12-02 14:48:05 +0800349 *
Omar Sandovale62958f2019-12-02 17:34:17 -0800350 * @inode: inode that the bio is for.
Omar Sandovalfb30f472020-04-16 14:46:16 -0700351 * @bio: bio to look up.
Omar Sandovalfb30f472020-04-16 14:46:16 -0700352 * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return
353 * checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If
354 * NULL, the checksum buffer is allocated and returned in
355 * btrfs_io_bio(bio)->csum instead.
Omar Sandovale62958f2019-12-02 17:34:17 -0800356 *
357 * Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
358 */
Qu Wenruo62751932020-12-02 14:48:06 +0800359blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u8 *dst)
Chris Mason61b49442008-07-31 15:42:53 -0400360{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400361 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
Miao Xiefacc8a222013-07-25 19:22:34 +0800362 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
363 struct btrfs_path *path;
Qu Wenruo62751932020-12-02 14:48:06 +0800364 const u32 sectorsize = fs_info->sectorsize;
David Sterba223486c2020-07-02 11:27:30 +0200365 const u32 csum_size = fs_info->csum_size;
Qu Wenruo62751932020-12-02 14:48:06 +0800366 u32 orig_len = bio->bi_iter.bi_size;
367 u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
368 u64 cur_disk_bytenr;
369 u8 *csum;
370 const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
371 int count = 0;
Chris Mason61b49442008-07-31 15:42:53 -0400372
Josef Bacik42437a62020-10-16 11:29:18 -0400373 if (!fs_info->csum_root || (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
Josef Bacik334c16d2020-10-16 11:29:14 -0400374 return BLK_STS_OK;
375
Qu Wenruo9e464582020-12-02 14:48:05 +0800376 /*
377 * This function is only called for read bio.
378 *
379 * This means two things:
380 * - All our csums should only be in csum tree
381 * No ordered extents csums, as ordered extents are only for write
382 * path.
Qu Wenruo62751932020-12-02 14:48:06 +0800383 * - No need to bother any other info from bvec
384 * Since we're looking up csums, the only important info is the
385 * disk_bytenr and the length, which can be extracted from bi_iter
386 * directly.
Qu Wenruo9e464582020-12-02 14:48:05 +0800387 */
388 ASSERT(bio_op(bio) == REQ_OP_READ);
Chris Mason61b49442008-07-31 15:42:53 -0400389 path = btrfs_alloc_path();
Tsutomu Itohc2db1072011-03-01 06:48:31 +0000390 if (!path)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200391 return BLK_STS_RESOURCE;
Miao Xiefacc8a222013-07-25 19:22:34 +0800392
Miao Xiefacc8a222013-07-25 19:22:34 +0800393 if (!dst) {
Omar Sandovalfb30f472020-04-16 14:46:16 -0700394 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
395
Miao Xiefacc8a222013-07-25 19:22:34 +0800396 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
David Sterba31feccc2018-11-22 17:16:46 +0100397 btrfs_bio->csum = kmalloc_array(nblocks, csum_size,
398 GFP_NOFS);
399 if (!btrfs_bio->csum) {
Miao Xiefacc8a222013-07-25 19:22:34 +0800400 btrfs_free_path(path);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200401 return BLK_STS_RESOURCE;
Miao Xiefacc8a222013-07-25 19:22:34 +0800402 }
Miao Xiefacc8a222013-07-25 19:22:34 +0800403 } else {
404 btrfs_bio->csum = btrfs_bio->csum_inline;
405 }
406 csum = btrfs_bio->csum;
407 } else {
Johannes Thumshirn10fe6ca2019-05-22 10:19:02 +0200408 csum = dst;
Miao Xiefacc8a222013-07-25 19:22:34 +0800409 }
410
Qu Wenruo35478d02020-11-13 20:51:41 +0800411 /*
412 * If requested number of sectors is larger than one leaf can contain,
413 * kick the readahead for csum tree.
414 */
415 if (nblocks > fs_info->csums_per_leaf)
David Sterbae4058b52015-11-27 16:31:35 +0100416 path->reada = READA_FORWARD;
Chris Mason61b49442008-07-31 15:42:53 -0400417
Chris Mason2cf85722011-07-26 15:35:09 -0400418 /*
419 * the free space stuff is only read when it hasn't been
420 * updated in the current transaction. So, we can safely
421 * read from the commit root and sidestep a nasty deadlock
422 * between reading the free space cache and updating the csum tree.
423 */
Nikolay Borisov70ddc552017-02-20 13:50:35 +0200424 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
Chris Mason2cf85722011-07-26 15:35:09 -0400425 path->search_commit_root = 1;
Josef Bacikddf23b32011-09-11 10:52:24 -0400426 path->skip_locking = 1;
427 }
Chris Mason2cf85722011-07-26 15:35:09 -0400428
Qu Wenruo62751932020-12-02 14:48:06 +0800429 for (cur_disk_bytenr = orig_disk_bytenr;
430 cur_disk_bytenr < orig_disk_bytenr + orig_len;
431 cur_disk_bytenr += (count * sectorsize)) {
432 u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr;
433 unsigned int sector_offset;
434 u8 *csum_dst;
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530435
Chris Mason61b49442008-07-31 15:42:53 -0400436 /*
Qu Wenruo62751932020-12-02 14:48:06 +0800437 * Although both cur_disk_bytenr and orig_disk_bytenr is u64,
438 * we're calculating the offset to the bio start.
439 *
440 * Bio size is limited to UINT_MAX, thus unsigned int is large
441 * enough to contain the raw result, not to mention the right
442 * shifted result.
Chris Mason61b49442008-07-31 15:42:53 -0400443 */
Qu Wenruo62751932020-12-02 14:48:06 +0800444 ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX);
445 sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >>
446 fs_info->sectorsize_bits;
447 csum_dst = csum + sector_offset * csum_size;
448
449 count = search_csum_tree(fs_info, path, cur_disk_bytenr,
450 search_len, csum_dst);
451 if (count <= 0) {
452 /*
453 * Either we hit a critical error or we didn't find
454 * the csum.
455 * Either way, we put zero into the csums dst, and skip
456 * to the next sector.
457 */
458 memset(csum_dst, 0, csum_size);
459 count = 1;
460
461 /*
462 * For data reloc inode, we need to mark the range
463 * NODATASUM so that balance won't report false csum
464 * error.
465 */
466 if (BTRFS_I(inode)->root->root_key.objectid ==
467 BTRFS_DATA_RELOC_TREE_OBJECTID) {
468 u64 file_offset;
469 int ret;
470
471 ret = search_file_offset_in_bio(bio, inode,
472 cur_disk_bytenr, &file_offset);
473 if (ret)
474 set_extent_bits(io_tree, file_offset,
475 file_offset + sectorsize - 1,
476 EXTENT_NODATASUM);
477 } else {
478 btrfs_warn_rl(fs_info,
479 "csum hole found for disk bytenr range [%llu, %llu)",
480 cur_disk_bytenr, cur_disk_bytenr + sectorsize);
481 }
Miao Xiee4100d92013-04-05 07:20:56 +0000482 }
Chris Mason61b49442008-07-31 15:42:53 -0400483 }
Chris Mason389f2392016-03-21 06:59:09 -0700484
Chris Mason61b49442008-07-31 15:42:53 -0400485 btrfs_free_path(path);
Omar Sandovale62958f2019-12-02 17:34:17 -0800486 return BLK_STS_OK;
Josef Bacik4b46fce2010-05-23 11:00:55 -0400487}
488
Yan Zheng17d217f2008-12-12 10:03:38 -0500489int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
Arne Jansena2de7332011-03-08 14:14:00 +0100490 struct list_head *list, int search_commit)
Yan Zheng17d217f2008-12-12 10:03:38 -0500491{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400492 struct btrfs_fs_info *fs_info = root->fs_info;
Yan Zheng17d217f2008-12-12 10:03:38 -0500493 struct btrfs_key key;
494 struct btrfs_path *path;
495 struct extent_buffer *leaf;
496 struct btrfs_ordered_sum *sums;
Yan Zheng17d217f2008-12-12 10:03:38 -0500497 struct btrfs_csum_item *item;
Mark Fasheh0678b612011-08-05 15:46:16 -0700498 LIST_HEAD(tmplist);
Yan Zheng17d217f2008-12-12 10:03:38 -0500499 unsigned long offset;
500 int ret;
501 size_t size;
502 u64 csum_end;
David Sterba223486c2020-07-02 11:27:30 +0200503 const u32 csum_size = fs_info->csum_size;
Yan Zheng17d217f2008-12-12 10:03:38 -0500504
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400505 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
506 IS_ALIGNED(end + 1, fs_info->sectorsize));
Josef Bacik4277a9c2013-10-15 09:36:40 -0400507
Yan Zheng17d217f2008-12-12 10:03:38 -0500508 path = btrfs_alloc_path();
Mark Fashehd8926bb2011-07-13 10:38:47 -0700509 if (!path)
510 return -ENOMEM;
Yan Zheng17d217f2008-12-12 10:03:38 -0500511
Arne Jansena2de7332011-03-08 14:14:00 +0100512 if (search_commit) {
513 path->skip_locking = 1;
David Sterbae4058b52015-11-27 16:31:35 +0100514 path->reada = READA_FORWARD;
Arne Jansena2de7332011-03-08 14:14:00 +0100515 path->search_commit_root = 1;
516 }
517
Yan Zheng17d217f2008-12-12 10:03:38 -0500518 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
519 key.offset = start;
520 key.type = BTRFS_EXTENT_CSUM_KEY;
521
Yan Zheng07d400a2009-01-06 11:42:00 -0500522 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
Yan Zheng17d217f2008-12-12 10:03:38 -0500523 if (ret < 0)
524 goto fail;
525 if (ret > 0 && path->slots[0] > 0) {
526 leaf = path->nodes[0];
527 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
528 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
529 key.type == BTRFS_EXTENT_CSUM_KEY) {
David Sterba265fdfa2020-07-01 21:19:09 +0200530 offset = (start - key.offset) >> fs_info->sectorsize_bits;
Yan Zheng17d217f2008-12-12 10:03:38 -0500531 if (offset * csum_size <
532 btrfs_item_size_nr(leaf, path->slots[0] - 1))
533 path->slots[0]--;
534 }
535 }
536
537 while (start <= end) {
538 leaf = path->nodes[0];
539 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
Yan Zheng07d400a2009-01-06 11:42:00 -0500540 ret = btrfs_next_leaf(root, path);
Yan Zheng17d217f2008-12-12 10:03:38 -0500541 if (ret < 0)
542 goto fail;
543 if (ret > 0)
544 break;
545 leaf = path->nodes[0];
546 }
547
548 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
549 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
Zhi Yong Wu628c8282013-03-18 09:18:09 +0000550 key.type != BTRFS_EXTENT_CSUM_KEY ||
551 key.offset > end)
Yan Zheng17d217f2008-12-12 10:03:38 -0500552 break;
553
554 if (key.offset > start)
555 start = key.offset;
556
557 size = btrfs_item_size_nr(leaf, path->slots[0]);
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400558 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
Yan Zheng87b29b22008-12-17 10:21:48 -0500559 if (csum_end <= start) {
560 path->slots[0]++;
561 continue;
562 }
Yan Zheng17d217f2008-12-12 10:03:38 -0500563
Yan Zheng07d400a2009-01-06 11:42:00 -0500564 csum_end = min(csum_end, end + 1);
Yan Zheng17d217f2008-12-12 10:03:38 -0500565 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
566 struct btrfs_csum_item);
Yan Zheng07d400a2009-01-06 11:42:00 -0500567 while (start < csum_end) {
568 size = min_t(size_t, csum_end - start,
Johannes Thumshirn1e25a2e2019-05-22 10:19:01 +0200569 max_ordered_sum_bytes(fs_info, csum_size));
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400570 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
Miao Xief51a4a12013-06-19 10:36:09 +0800571 GFP_NOFS);
Mark Fasheh0678b612011-08-05 15:46:16 -0700572 if (!sums) {
573 ret = -ENOMEM;
574 goto fail;
575 }
Yan Zheng17d217f2008-12-12 10:03:38 -0500576
Yan Zheng07d400a2009-01-06 11:42:00 -0500577 sums->bytenr = start;
Miao Xief51a4a12013-06-19 10:36:09 +0800578 sums->len = (int)size;
Yan Zheng07d400a2009-01-06 11:42:00 -0500579
David Sterba265fdfa2020-07-01 21:19:09 +0200580 offset = (start - key.offset) >> fs_info->sectorsize_bits;
Yan Zheng07d400a2009-01-06 11:42:00 -0500581 offset *= csum_size;
David Sterba265fdfa2020-07-01 21:19:09 +0200582 size >>= fs_info->sectorsize_bits;
Yan Zheng07d400a2009-01-06 11:42:00 -0500583
Miao Xief51a4a12013-06-19 10:36:09 +0800584 read_extent_buffer(path->nodes[0],
585 sums->sums,
586 ((unsigned long)item) + offset,
587 csum_size * size);
Yan Zheng07d400a2009-01-06 11:42:00 -0500588
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400589 start += fs_info->sectorsize * size;
Mark Fasheh0678b612011-08-05 15:46:16 -0700590 list_add_tail(&sums->list, &tmplist);
Yan Zheng17d217f2008-12-12 10:03:38 -0500591 }
Yan Zheng17d217f2008-12-12 10:03:38 -0500592 path->slots[0]++;
593 }
594 ret = 0;
595fail:
Mark Fasheh0678b612011-08-05 15:46:16 -0700596 while (ret < 0 && !list_empty(&tmplist)) {
Chris Mason6e5aafb2014-11-04 06:59:04 -0800597 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
Mark Fasheh0678b612011-08-05 15:46:16 -0700598 list_del(&sums->list);
599 kfree(sums);
600 }
601 list_splice_tail(&tmplist, list);
602
Yan Zheng17d217f2008-12-12 10:03:38 -0500603 btrfs_free_path(path);
604 return ret;
605}
606
Nikolay Borisov51d470a2019-04-22 16:07:31 +0300607/*
608 * btrfs_csum_one_bio - Calculates checksums of the data contained inside a bio
609 * @inode: Owner of the data inside the bio
610 * @bio: Contains the data to be checksummed
611 * @file_start: offset in file this bio begins to describe
612 * @contig: Boolean. If true/1 means all bio vecs in this bio are
613 * contiguous and they begin at @file_start in the file. False/0
614 * means this bio can contains potentially discontigous bio vecs
615 * so the logical offset of each should be calculated separately.
616 */
Nikolay Borisovbd242a02020-06-03 08:55:07 +0300617blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400618 u64 file_start, int contig)
Chris Masone0156402008-04-16 11:15:20 -0400619{
Nikolay Borisovc3504372020-06-03 08:55:03 +0300620 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200621 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
Chris Masone6dcd2d2008-07-17 12:53:50 -0400622 struct btrfs_ordered_sum *sums;
Christoph Hellwig6cd7ce42016-11-25 09:07:49 +0100623 struct btrfs_ordered_extent *ordered = NULL;
Chris Masone0156402008-04-16 11:15:20 -0400624 char *data;
Liu Bo17347ce2017-05-15 15:33:27 -0700625 struct bvec_iter iter;
626 struct bio_vec bvec;
Miao Xief51a4a12013-06-19 10:36:09 +0800627 int index;
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530628 int nr_sectors;
Chris Mason3edf7d32008-07-18 06:17:13 -0400629 unsigned long total_bytes = 0;
630 unsigned long this_sum_bytes = 0;
Liu Bo17347ce2017-05-15 15:33:27 -0700631 int i;
Chris Mason3edf7d32008-07-18 06:17:13 -0400632 u64 offset;
Nikolay Borisova3d46ae2019-04-01 11:29:58 +0300633 unsigned nofs_flag;
Chris Masone0156402008-04-16 11:15:20 -0400634
Nikolay Borisova3d46ae2019-04-01 11:29:58 +0300635 nofs_flag = memalloc_nofs_save();
636 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
637 GFP_KERNEL);
638 memalloc_nofs_restore(nofs_flag);
639
Chris Masone0156402008-04-16 11:15:20 -0400640 if (!sums)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200641 return BLK_STS_RESOURCE;
Chris Mason3edf7d32008-07-18 06:17:13 -0400642
Kent Overstreet4f024f32013-10-11 15:44:27 -0700643 sums->len = bio->bi_iter.bi_size;
Chris Masone6dcd2d2008-07-17 12:53:50 -0400644 INIT_LIST_HEAD(&sums->list);
Chris Masond20f7042008-12-08 16:58:54 -0500645
646 if (contig)
647 offset = file_start;
648 else
Christoph Hellwig6cd7ce42016-11-25 09:07:49 +0100649 offset = 0; /* shut up gcc */
Chris Masond20f7042008-12-08 16:58:54 -0500650
David Sterba1201b582020-11-26 15:41:27 +0100651 sums->bytenr = bio->bi_iter.bi_sector << 9;
Miao Xief51a4a12013-06-19 10:36:09 +0800652 index = 0;
Chris Masone0156402008-04-16 11:15:20 -0400653
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200654 shash->tfm = fs_info->csum_shash;
655
Liu Bo17347ce2017-05-15 15:33:27 -0700656 bio_for_each_segment(bvec, bio, iter) {
Chris Masond20f7042008-12-08 16:58:54 -0500657 if (!contig)
Liu Bo17347ce2017-05-15 15:33:27 -0700658 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
Chris Masond20f7042008-12-08 16:58:54 -0500659
Christoph Hellwig6cd7ce42016-11-25 09:07:49 +0100660 if (!ordered) {
661 ordered = btrfs_lookup_ordered_extent(inode, offset);
662 BUG_ON(!ordered); /* Logic error */
663 }
664
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400665 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
Liu Bo17347ce2017-05-15 15:33:27 -0700666 bvec.bv_len + fs_info->sectorsize
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400667 - 1);
Chris Mason3edf7d32008-07-18 06:17:13 -0400668
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530669 for (i = 0; i < nr_sectors; i++) {
Omar Sandovalbffe6332019-12-02 17:34:19 -0800670 if (offset >= ordered->file_offset + ordered->num_bytes ||
671 offset < ordered->file_offset) {
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530672 unsigned long bytes_left;
673
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530674 sums->len = this_sum_bytes;
675 this_sum_bytes = 0;
Nikolay Borisovf9756262019-04-10 16:16:11 +0300676 btrfs_add_ordered_sum(ordered, sums);
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530677 btrfs_put_ordered_extent(ordered);
678
679 bytes_left = bio->bi_iter.bi_size - total_bytes;
680
Nikolay Borisova3d46ae2019-04-01 11:29:58 +0300681 nofs_flag = memalloc_nofs_save();
682 sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
683 bytes_left), GFP_KERNEL);
684 memalloc_nofs_restore(nofs_flag);
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530685 BUG_ON(!sums); /* -ENOMEM */
686 sums->len = bytes_left;
687 ordered = btrfs_lookup_ordered_extent(inode,
688 offset);
689 ASSERT(ordered); /* Logic error */
David Sterba1201b582020-11-26 15:41:27 +0100690 sums->bytenr = (bio->bi_iter.bi_sector << 9)
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530691 + total_bytes;
692 index = 0;
Chandan Rajendrac40a3d32016-01-21 15:55:54 +0530693 }
694
Johannes Thumshirn443c8e22019-03-07 17:14:00 +0100695 data = kmap_atomic(bvec.bv_page);
Eric Biggersfd080012020-04-30 23:51:59 -0700696 crypto_shash_digest(shash, data + bvec.bv_offset
Johannes Thumshirnd5178572019-06-03 16:58:57 +0200697 + (i * fs_info->sectorsize),
Eric Biggersfd080012020-04-30 23:51:59 -0700698 fs_info->sectorsize,
699 sums->sums + index);
Johannes Thumshirn443c8e22019-03-07 17:14:00 +0100700 kunmap_atomic(data);
David Sterba713cebf2020-06-30 18:04:02 +0200701 index += fs_info->csum_size;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400702 offset += fs_info->sectorsize;
703 this_sum_bytes += fs_info->sectorsize;
704 total_bytes += fs_info->sectorsize;
Chris Mason3edf7d32008-07-18 06:17:13 -0400705 }
706
Chris Masone0156402008-04-16 11:15:20 -0400707 }
Chris Masoned98b562008-07-22 23:06:42 -0400708 this_sum_bytes = 0;
Nikolay Borisovf9756262019-04-10 16:16:11 +0300709 btrfs_add_ordered_sum(ordered, sums);
Chris Mason3edf7d32008-07-18 06:17:13 -0400710 btrfs_put_ordered_extent(ordered);
Chris Masone0156402008-04-16 11:15:20 -0400711 return 0;
712}
713
Chris Mason459931e2008-12-10 09:10:46 -0500714/*
715 * helper function for csum removal, this expects the
716 * key to describe the csum pointed to by the path, and it expects
717 * the csum to overlap the range [bytenr, len]
718 *
719 * The csum should not be entirely contained in the range and the
720 * range should not be entirely contained in the csum.
721 *
722 * This calls btrfs_truncate_item with the correct args based on the
723 * overlap, and fixes up the key as required.
724 */
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400725static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
Jeff Mahoney143bede2012-03-01 14:56:26 +0100726 struct btrfs_path *path,
727 struct btrfs_key *key,
728 u64 bytenr, u64 len)
Chris Mason459931e2008-12-10 09:10:46 -0500729{
730 struct extent_buffer *leaf;
David Sterba223486c2020-07-02 11:27:30 +0200731 const u32 csum_size = fs_info->csum_size;
Chris Mason459931e2008-12-10 09:10:46 -0500732 u64 csum_end;
733 u64 end_byte = bytenr + len;
David Sterba265fdfa2020-07-01 21:19:09 +0200734 u32 blocksize_bits = fs_info->sectorsize_bits;
Chris Mason459931e2008-12-10 09:10:46 -0500735
736 leaf = path->nodes[0];
737 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
David Sterba265fdfa2020-07-01 21:19:09 +0200738 csum_end <<= blocksize_bits;
Chris Mason459931e2008-12-10 09:10:46 -0500739 csum_end += key->offset;
740
741 if (key->offset < bytenr && csum_end <= end_byte) {
742 /*
743 * [ bytenr - len ]
744 * [ ]
745 * [csum ]
746 * A simple truncate off the end of the item
747 */
748 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
749 new_size *= csum_size;
David Sterba78ac4f92019-03-20 14:49:12 +0100750 btrfs_truncate_item(path, new_size, 1);
Chris Mason459931e2008-12-10 09:10:46 -0500751 } else if (key->offset >= bytenr && csum_end > end_byte &&
752 end_byte > key->offset) {
753 /*
754 * [ bytenr - len ]
755 * [ ]
756 * [csum ]
757 * we need to truncate from the beginning of the csum
758 */
759 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
760 new_size *= csum_size;
761
David Sterba78ac4f92019-03-20 14:49:12 +0100762 btrfs_truncate_item(path, new_size, 0);
Chris Mason459931e2008-12-10 09:10:46 -0500763
764 key->offset = end_byte;
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400765 btrfs_set_item_key_safe(fs_info, path, key);
Chris Mason459931e2008-12-10 09:10:46 -0500766 } else {
767 BUG();
768 }
Chris Mason459931e2008-12-10 09:10:46 -0500769}
770
771/*
772 * deletes the csum items from the csum tree for a given
773 * range of bytes.
774 */
775int btrfs_del_csums(struct btrfs_trans_handle *trans,
Filipe Manana40e046a2019-12-05 16:58:30 +0000776 struct btrfs_root *root, u64 bytenr, u64 len)
Chris Mason459931e2008-12-10 09:10:46 -0500777{
Filipe Manana40e046a2019-12-05 16:58:30 +0000778 struct btrfs_fs_info *fs_info = trans->fs_info;
Chris Mason459931e2008-12-10 09:10:46 -0500779 struct btrfs_path *path;
780 struct btrfs_key key;
781 u64 end_byte = bytenr + len;
782 u64 csum_end;
783 struct extent_buffer *leaf;
784 int ret;
David Sterba223486c2020-07-02 11:27:30 +0200785 const u32 csum_size = fs_info->csum_size;
David Sterba265fdfa2020-07-01 21:19:09 +0200786 u32 blocksize_bits = fs_info->sectorsize_bits;
Chris Mason459931e2008-12-10 09:10:46 -0500787
Filipe Manana40e046a2019-12-05 16:58:30 +0000788 ASSERT(root == fs_info->csum_root ||
789 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
790
Chris Mason459931e2008-12-10 09:10:46 -0500791 path = btrfs_alloc_path();
liubo2a29edc2011-01-26 06:22:08 +0000792 if (!path)
793 return -ENOMEM;
Chris Mason459931e2008-12-10 09:10:46 -0500794
Chris Masond3977122009-01-05 21:25:51 -0500795 while (1) {
Chris Mason459931e2008-12-10 09:10:46 -0500796 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
797 key.offset = end_byte - 1;
798 key.type = BTRFS_EXTENT_CSUM_KEY;
799
800 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
801 if (ret > 0) {
802 if (path->slots[0] == 0)
Tsutomu Itoh65a246c2011-05-19 04:37:44 +0000803 break;
Chris Mason459931e2008-12-10 09:10:46 -0500804 path->slots[0]--;
Josef Bacikad0397a2011-01-28 18:44:44 +0000805 } else if (ret < 0) {
Tsutomu Itoh65a246c2011-05-19 04:37:44 +0000806 break;
Chris Mason459931e2008-12-10 09:10:46 -0500807 }
Josef Bacikad0397a2011-01-28 18:44:44 +0000808
Chris Mason459931e2008-12-10 09:10:46 -0500809 leaf = path->nodes[0];
810 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
811
812 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
813 key.type != BTRFS_EXTENT_CSUM_KEY) {
814 break;
815 }
816
817 if (key.offset >= end_byte)
818 break;
819
820 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
821 csum_end <<= blocksize_bits;
822 csum_end += key.offset;
823
824 /* this csum ends before we start, we're done */
825 if (csum_end <= bytenr)
826 break;
827
828 /* delete the entire item, it is inside our range */
829 if (key.offset >= bytenr && csum_end <= end_byte) {
Filipe Manana6f546212017-01-28 01:47:56 +0000830 int del_nr = 1;
831
832 /*
833 * Check how many csum items preceding this one in this
834 * leaf correspond to our range and then delete them all
835 * at once.
836 */
837 if (key.offset > bytenr && path->slots[0] > 0) {
838 int slot = path->slots[0] - 1;
839
840 while (slot >= 0) {
841 struct btrfs_key pk;
842
843 btrfs_item_key_to_cpu(leaf, &pk, slot);
844 if (pk.offset < bytenr ||
845 pk.type != BTRFS_EXTENT_CSUM_KEY ||
846 pk.objectid !=
847 BTRFS_EXTENT_CSUM_OBJECTID)
848 break;
849 path->slots[0] = slot;
850 del_nr++;
851 key.offset = pk.offset;
852 slot--;
853 }
854 }
855 ret = btrfs_del_items(trans, root, path,
856 path->slots[0], del_nr);
Tsutomu Itoh65a246c2011-05-19 04:37:44 +0000857 if (ret)
858 goto out;
Chris Masondcbdd4d2008-12-16 13:51:01 -0500859 if (key.offset == bytenr)
860 break;
Chris Mason459931e2008-12-10 09:10:46 -0500861 } else if (key.offset < bytenr && csum_end > end_byte) {
862 unsigned long offset;
863 unsigned long shift_len;
864 unsigned long item_offset;
865 /*
866 * [ bytenr - len ]
867 * [csum ]
868 *
869 * Our bytes are in the middle of the csum,
870 * we need to split this item and insert a new one.
871 *
872 * But we can't drop the path because the
873 * csum could change, get removed, extended etc.
874 *
875 * The trick here is the max size of a csum item leaves
876 * enough room in the tree block for a single
877 * item header. So, we split the item in place,
878 * adding a new header pointing to the existing
879 * bytes. Then we loop around again and we have
880 * a nicely formed csum item that we can neatly
881 * truncate.
882 */
883 offset = (bytenr - key.offset) >> blocksize_bits;
884 offset *= csum_size;
885
886 shift_len = (len >> blocksize_bits) * csum_size;
887
888 item_offset = btrfs_item_ptr_offset(leaf,
889 path->slots[0]);
890
David Sterbab159fa22016-11-08 18:09:03 +0100891 memzero_extent_buffer(leaf, item_offset + offset,
Chris Mason459931e2008-12-10 09:10:46 -0500892 shift_len);
893 key.offset = bytenr;
894
895 /*
896 * btrfs_split_item returns -EAGAIN when the
897 * item changed size or key
898 */
899 ret = btrfs_split_item(trans, root, path, &key, offset);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100900 if (ret && ret != -EAGAIN) {
Jeff Mahoney66642832016-06-10 18:19:25 -0400901 btrfs_abort_transaction(trans, ret);
Jeff Mahoney79787ea2012-03-12 16:03:00 +0100902 goto out;
903 }
Chris Mason459931e2008-12-10 09:10:46 -0500904
905 key.offset = end_byte - 1;
906 } else {
Jeff Mahoney2ff7e612016-06-22 18:54:24 -0400907 truncate_one_csum(fs_info, path, &key, bytenr, len);
Chris Masondcbdd4d2008-12-16 13:51:01 -0500908 if (key.offset < bytenr)
909 break;
Chris Mason459931e2008-12-10 09:10:46 -0500910 }
David Sterbab3b4aa72011-04-21 01:20:15 +0200911 btrfs_release_path(path);
Chris Mason459931e2008-12-10 09:10:46 -0500912 }
Tsutomu Itoh65a246c2011-05-19 04:37:44 +0000913 ret = 0;
Chris Mason459931e2008-12-10 09:10:46 -0500914out:
915 btrfs_free_path(path);
Tsutomu Itoh65a246c2011-05-19 04:37:44 +0000916 return ret;
Chris Mason459931e2008-12-10 09:10:46 -0500917}
918
Chris Mason065631f2008-02-20 12:07:25 -0500919int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
Chris Masond20f7042008-12-08 16:58:54 -0500920 struct btrfs_root *root,
Chris Masone6dcd2d2008-07-17 12:53:50 -0400921 struct btrfs_ordered_sum *sums)
Chris Masonf254e522007-03-29 15:15:27 -0400922{
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400923 struct btrfs_fs_info *fs_info = root->fs_info;
Chris Masonf254e522007-03-29 15:15:27 -0400924 struct btrfs_key file_key;
Chris Mason6567e832007-04-16 09:22:45 -0400925 struct btrfs_key found_key;
Chris Mason5caf2a02007-04-02 11:20:42 -0400926 struct btrfs_path *path;
Chris Masonf254e522007-03-29 15:15:27 -0400927 struct btrfs_csum_item *item;
Chris Mason065631f2008-02-20 12:07:25 -0500928 struct btrfs_csum_item *item_end;
Chris Masonff79f812007-10-15 16:22:25 -0400929 struct extent_buffer *leaf = NULL;
Miao Xief51a4a12013-06-19 10:36:09 +0800930 u64 next_offset;
931 u64 total_bytes = 0;
Chris Mason6567e832007-04-16 09:22:45 -0400932 u64 csum_offset;
Miao Xief51a4a12013-06-19 10:36:09 +0800933 u64 bytenr;
Chris Masonf578d4b2007-10-25 15:42:56 -0400934 u32 nritems;
935 u32 ins_size;
Miao Xief51a4a12013-06-19 10:36:09 +0800936 int index = 0;
937 int found_next;
938 int ret;
David Sterba223486c2020-07-02 11:27:30 +0200939 const u32 csum_size = fs_info->csum_size;
Chris Mason6e92f5e2008-02-20 12:07:25 -0500940
Chris Mason5caf2a02007-04-02 11:20:42 -0400941 path = btrfs_alloc_path();
Mark Fashehd8926bb2011-07-13 10:38:47 -0700942 if (!path)
943 return -ENOMEM;
Chris Mason065631f2008-02-20 12:07:25 -0500944again:
945 next_offset = (u64)-1;
946 found_next = 0;
Miao Xief51a4a12013-06-19 10:36:09 +0800947 bytenr = sums->bytenr + total_bytes;
Chris Masond20f7042008-12-08 16:58:54 -0500948 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
Miao Xief51a4a12013-06-19 10:36:09 +0800949 file_key.offset = bytenr;
David Sterba962a2982014-06-04 18:41:45 +0200950 file_key.type = BTRFS_EXTENT_CSUM_KEY;
Chris Masona429e512007-04-18 16:15:28 -0400951
Miao Xief51a4a12013-06-19 10:36:09 +0800952 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
Chris Masonff79f812007-10-15 16:22:25 -0400953 if (!IS_ERR(item)) {
Chris Mason639cb582008-08-28 06:15:25 -0400954 ret = 0;
Miao Xief51a4a12013-06-19 10:36:09 +0800955 leaf = path->nodes[0];
956 item_end = btrfs_item_ptr(leaf, path->slots[0],
957 struct btrfs_csum_item);
958 item_end = (struct btrfs_csum_item *)((char *)item_end +
959 btrfs_item_size_nr(leaf, path->slots[0]));
Chris Masona429e512007-04-18 16:15:28 -0400960 goto found;
Chris Masonff79f812007-10-15 16:22:25 -0400961 }
Chris Masona429e512007-04-18 16:15:28 -0400962 ret = PTR_ERR(item);
Yan, Zheng4a500fd2010-05-16 10:49:59 -0400963 if (ret != -EFBIG && ret != -ENOENT)
Filipe Manana918cdf42020-05-18 12:15:18 +0100964 goto out;
Yan, Zheng4a500fd2010-05-16 10:49:59 -0400965
Chris Masona429e512007-04-18 16:15:28 -0400966 if (ret == -EFBIG) {
967 u32 item_size;
968 /* we found one, but it isn't big enough yet */
Chris Mason5f39d392007-10-15 16:14:19 -0400969 leaf = path->nodes[0];
970 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
Josef Bacik607d4322008-12-02 07:17:45 -0500971 if ((item_size / csum_size) >=
Jeff Mahoney0b246af2016-06-22 18:54:23 -0400972 MAX_CSUM_ITEMS(fs_info, csum_size)) {
Chris Masona429e512007-04-18 16:15:28 -0400973 /* already at max size, make a new one */
974 goto insert;
975 }
976 } else {
Chris Masonf578d4b2007-10-25 15:42:56 -0400977 int slot = path->slots[0] + 1;
Chris Masona429e512007-04-18 16:15:28 -0400978 /* we didn't find a csum item, insert one */
Chris Masonf578d4b2007-10-25 15:42:56 -0400979 nritems = btrfs_header_nritems(path->nodes[0]);
Filipe Manana35045bf2014-04-09 14:38:34 +0100980 if (!nritems || (path->slots[0] >= nritems - 1)) {
Chris Masonf578d4b2007-10-25 15:42:56 -0400981 ret = btrfs_next_leaf(root, path);
Filipe Manana7e4a3f72020-05-18 12:15:09 +0100982 if (ret < 0) {
983 goto out;
984 } else if (ret > 0) {
Chris Masonf578d4b2007-10-25 15:42:56 -0400985 found_next = 1;
Chris Masonf578d4b2007-10-25 15:42:56 -0400986 goto insert;
Filipe Manana7e4a3f72020-05-18 12:15:09 +0100987 }
Filipe Manana27b9a812014-08-09 21:22:27 +0100988 slot = path->slots[0];
Chris Masonf578d4b2007-10-25 15:42:56 -0400989 }
990 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
Chris Masond20f7042008-12-08 16:58:54 -0500991 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
992 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
Chris Masonf578d4b2007-10-25 15:42:56 -0400993 found_next = 1;
994 goto insert;
995 }
996 next_offset = found_key.offset;
997 found_next = 1;
Chris Masona429e512007-04-18 16:15:28 -0400998 goto insert;
999 }
1000
1001 /*
Filipe Mananacc146002020-05-18 12:15:00 +01001002 * At this point, we know the tree has a checksum item that ends at an
1003 * offset matching the start of the checksum range we want to insert.
1004 * We try to extend that item as much as possible and then add as many
1005 * checksums to it as they fit.
1006 *
1007 * First check if the leaf has enough free space for at least one
1008 * checksum. If it has go directly to the item extension code, otherwise
1009 * release the path and do a search for insertion before the extension.
Chris Masona429e512007-04-18 16:15:28 -04001010 */
Filipe Mananacc146002020-05-18 12:15:00 +01001011 if (btrfs_leaf_free_space(leaf) >= csum_size) {
1012 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1013 csum_offset = (bytenr - found_key.offset) >>
David Sterba265fdfa2020-07-01 21:19:09 +02001014 fs_info->sectorsize_bits;
Filipe Mananacc146002020-05-18 12:15:00 +01001015 goto extend_csum;
1016 }
1017
David Sterbab3b4aa72011-04-21 01:20:15 +02001018 btrfs_release_path(path);
Chris Mason6567e832007-04-16 09:22:45 -04001019 ret = btrfs_search_slot(trans, root, &file_key, path,
Josef Bacik607d4322008-12-02 07:17:45 -05001020 csum_size, 1);
Chris Mason6567e832007-04-16 09:22:45 -04001021 if (ret < 0)
Filipe Manana918cdf42020-05-18 12:15:18 +01001022 goto out;
Chris Mason459931e2008-12-10 09:10:46 -05001023
1024 if (ret > 0) {
1025 if (path->slots[0] == 0)
1026 goto insert;
1027 path->slots[0]--;
Chris Mason6567e832007-04-16 09:22:45 -04001028 }
Chris Mason459931e2008-12-10 09:10:46 -05001029
Chris Mason5f39d392007-10-15 16:14:19 -04001030 leaf = path->nodes[0];
1031 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
David Sterba265fdfa2020-07-01 21:19:09 +02001032 csum_offset = (bytenr - found_key.offset) >> fs_info->sectorsize_bits;
Chris Mason459931e2008-12-10 09:10:46 -05001033
David Sterba962a2982014-06-04 18:41:45 +02001034 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
Chris Masond20f7042008-12-08 16:58:54 -05001035 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001036 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
Chris Mason6567e832007-04-16 09:22:45 -04001037 goto insert;
1038 }
Chris Mason459931e2008-12-10 09:10:46 -05001039
Filipe Mananacc146002020-05-18 12:15:00 +01001040extend_csum:
Liu Bo2f697dc2013-02-04 13:12:18 +00001041 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
Josef Bacik607d4322008-12-02 07:17:45 -05001042 csum_size) {
Liu Bo2f697dc2013-02-04 13:12:18 +00001043 int extend_nr;
1044 u64 tmp;
1045 u32 diff;
Chris Mason459931e2008-12-10 09:10:46 -05001046
Miao Xief51a4a12013-06-19 10:36:09 +08001047 tmp = sums->len - total_bytes;
David Sterba265fdfa2020-07-01 21:19:09 +02001048 tmp >>= fs_info->sectorsize_bits;
Liu Bo2f697dc2013-02-04 13:12:18 +00001049 WARN_ON(tmp < 1);
1050
1051 extend_nr = max_t(int, 1, (int)tmp);
1052 diff = (csum_offset + extend_nr) * csum_size;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001053 diff = min(diff,
1054 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
Chris Mason459931e2008-12-10 09:10:46 -05001055
Chris Mason5f39d392007-10-15 16:14:19 -04001056 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
Filipe Mananacc146002020-05-18 12:15:00 +01001057 diff = min_t(u32, btrfs_leaf_free_space(leaf), diff);
Liu Bo2f697dc2013-02-04 13:12:18 +00001058 diff /= csum_size;
1059 diff *= csum_size;
Chris Mason459931e2008-12-10 09:10:46 -05001060
David Sterbac71dd882019-03-20 14:51:10 +01001061 btrfs_extend_item(path, diff);
Miao Xief51a4a12013-06-19 10:36:09 +08001062 ret = 0;
Chris Mason6567e832007-04-16 09:22:45 -04001063 goto csum;
1064 }
1065
1066insert:
David Sterbab3b4aa72011-04-21 01:20:15 +02001067 btrfs_release_path(path);
Chris Mason6567e832007-04-16 09:22:45 -04001068 csum_offset = 0;
Chris Masonf578d4b2007-10-25 15:42:56 -04001069 if (found_next) {
Liu Bo2f697dc2013-02-04 13:12:18 +00001070 u64 tmp;
Chris Masond20f7042008-12-08 16:58:54 -05001071
Miao Xief51a4a12013-06-19 10:36:09 +08001072 tmp = sums->len - total_bytes;
David Sterba265fdfa2020-07-01 21:19:09 +02001073 tmp >>= fs_info->sectorsize_bits;
Liu Bo2f697dc2013-02-04 13:12:18 +00001074 tmp = min(tmp, (next_offset - file_key.offset) >>
David Sterba265fdfa2020-07-01 21:19:09 +02001075 fs_info->sectorsize_bits);
Liu Bo2f697dc2013-02-04 13:12:18 +00001076
Seraphime Kirkovski50d04462016-12-15 14:38:28 +01001077 tmp = max_t(u64, 1, tmp);
1078 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
Josef Bacik607d4322008-12-02 07:17:45 -05001079 ins_size = csum_size * tmp;
Chris Masonf578d4b2007-10-25 15:42:56 -04001080 } else {
Josef Bacik607d4322008-12-02 07:17:45 -05001081 ins_size = csum_size;
Chris Masonf578d4b2007-10-25 15:42:56 -04001082 }
Chris Mason5caf2a02007-04-02 11:20:42 -04001083 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
Chris Masonf578d4b2007-10-25 15:42:56 -04001084 ins_size);
Chris Mason54aa1f42007-06-22 14:16:25 -04001085 if (ret < 0)
Filipe Manana918cdf42020-05-18 12:15:18 +01001086 goto out;
Dulshani Gunawardhanafae7f212013-10-31 10:30:08 +05301087 if (WARN_ON(ret != 0))
Filipe Manana918cdf42020-05-18 12:15:18 +01001088 goto out;
Chris Mason5f39d392007-10-15 16:14:19 -04001089 leaf = path->nodes[0];
Miao Xief51a4a12013-06-19 10:36:09 +08001090csum:
Chris Mason5f39d392007-10-15 16:14:19 -04001091 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
Miao Xief51a4a12013-06-19 10:36:09 +08001092 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
1093 btrfs_item_size_nr(leaf, path->slots[0]));
Chris Mason509659c2007-05-10 12:36:17 -04001094 item = (struct btrfs_csum_item *)((unsigned char *)item +
Josef Bacik607d4322008-12-02 07:17:45 -05001095 csum_offset * csum_size);
Chris Masonb18c6682007-04-17 13:26:50 -04001096found:
David Sterba265fdfa2020-07-01 21:19:09 +02001097 ins_size = (u32)(sums->len - total_bytes) >> fs_info->sectorsize_bits;
Miao Xief51a4a12013-06-19 10:36:09 +08001098 ins_size *= csum_size;
1099 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
1100 ins_size);
1101 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
1102 ins_size);
Chris Masonaadfeb62008-01-29 09:10:27 -05001103
Johannes Thumshirn1e25a2e2019-05-22 10:19:01 +02001104 index += ins_size;
Miao Xief51a4a12013-06-19 10:36:09 +08001105 ins_size /= csum_size;
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001106 total_bytes += ins_size * fs_info->sectorsize;
Chris Masona6591712011-07-19 12:04:14 -04001107
Chris Mason5caf2a02007-04-02 11:20:42 -04001108 btrfs_mark_buffer_dirty(path->nodes[0]);
Chris Masone6dcd2d2008-07-17 12:53:50 -04001109 if (total_bytes < sums->len) {
David Sterbab3b4aa72011-04-21 01:20:15 +02001110 btrfs_release_path(path);
Chris Masonb9473432009-03-13 11:00:37 -04001111 cond_resched();
Chris Mason065631f2008-02-20 12:07:25 -05001112 goto again;
1113 }
Chris Mason53863232008-08-15 15:34:18 -04001114out:
Chris Mason5caf2a02007-04-02 11:20:42 -04001115 btrfs_free_path(path);
Chris Masonf254e522007-03-29 15:15:27 -04001116 return ret;
1117}
Filipe Manana7ffbb592014-06-09 03:48:05 +01001118
Nikolay Borisov9cdc5122017-02-20 13:51:02 +02001119void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
Filipe Manana7ffbb592014-06-09 03:48:05 +01001120 const struct btrfs_path *path,
1121 struct btrfs_file_extent_item *fi,
1122 const bool new_inline,
1123 struct extent_map *em)
1124{
David Sterba3ffbd682018-06-29 10:56:42 +02001125 struct btrfs_fs_info *fs_info = inode->root->fs_info;
Nikolay Borisov9cdc5122017-02-20 13:51:02 +02001126 struct btrfs_root *root = inode->root;
Filipe Manana7ffbb592014-06-09 03:48:05 +01001127 struct extent_buffer *leaf = path->nodes[0];
1128 const int slot = path->slots[0];
1129 struct btrfs_key key;
1130 u64 extent_start, extent_end;
1131 u64 bytenr;
1132 u8 type = btrfs_file_extent_type(leaf, fi);
1133 int compress_type = btrfs_file_extent_compression(leaf, fi);
1134
Filipe Manana7ffbb592014-06-09 03:48:05 +01001135 btrfs_item_key_to_cpu(leaf, &key, slot);
1136 extent_start = key.offset;
Filipe Mananaa5eeb3d2020-03-09 12:41:06 +00001137 extent_end = btrfs_file_extent_end(path);
Filipe Manana7ffbb592014-06-09 03:48:05 +01001138 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
1139 if (type == BTRFS_FILE_EXTENT_REG ||
1140 type == BTRFS_FILE_EXTENT_PREALLOC) {
1141 em->start = extent_start;
1142 em->len = extent_end - extent_start;
1143 em->orig_start = extent_start -
1144 btrfs_file_extent_offset(leaf, fi);
1145 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
1146 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1147 if (bytenr == 0) {
1148 em->block_start = EXTENT_MAP_HOLE;
1149 return;
1150 }
1151 if (compress_type != BTRFS_COMPRESS_NONE) {
1152 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1153 em->compress_type = compress_type;
1154 em->block_start = bytenr;
1155 em->block_len = em->orig_block_len;
1156 } else {
1157 bytenr += btrfs_file_extent_offset(leaf, fi);
1158 em->block_start = bytenr;
1159 em->block_len = em->len;
1160 if (type == BTRFS_FILE_EXTENT_PREALLOC)
1161 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
1162 }
1163 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
1164 em->block_start = EXTENT_MAP_INLINE;
1165 em->start = extent_start;
1166 em->len = extent_end - extent_start;
1167 /*
1168 * Initialize orig_start and block_len with the same values
1169 * as in inode.c:btrfs_get_extent().
1170 */
1171 em->orig_start = EXTENT_MAP_HOLE;
1172 em->block_len = (u64)-1;
1173 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
1174 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1175 em->compress_type = compress_type;
1176 }
1177 } else {
Jeff Mahoney0b246af2016-06-22 18:54:23 -04001178 btrfs_err(fs_info,
Nikolay Borisov9cdc5122017-02-20 13:51:02 +02001179 "unknown file extent item type %d, inode %llu, offset %llu, "
1180 "root %llu", type, btrfs_ino(inode), extent_start,
Filipe Manana7ffbb592014-06-09 03:48:05 +01001181 root->root_key.objectid);
1182 }
1183}
Filipe Mananaa5eeb3d2020-03-09 12:41:06 +00001184
1185/*
1186 * Returns the end offset (non inclusive) of the file extent item the given path
1187 * points to. If it points to an inline extent, the returned offset is rounded
1188 * up to the sector size.
1189 */
1190u64 btrfs_file_extent_end(const struct btrfs_path *path)
1191{
1192 const struct extent_buffer *leaf = path->nodes[0];
1193 const int slot = path->slots[0];
1194 struct btrfs_file_extent_item *fi;
1195 struct btrfs_key key;
1196 u64 end;
1197
1198 btrfs_item_key_to_cpu(leaf, &key, slot);
1199 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
1200 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
1201
1202 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
1203 end = btrfs_file_extent_ram_bytes(leaf, fi);
1204 end = ALIGN(key.offset + end, leaf->fs_info->sectorsize);
1205 } else {
1206 end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
1207 }
1208
1209 return end;
1210}