David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2007 Oracle. All rights reserved. |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 4 | */ |
| 5 | |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 6 | #include <linux/slab.h> |
Chris Mason | d6bfde8 | 2008-04-30 13:59:35 -0400 | [diff] [blame] | 7 | #include <linux/blkdev.h> |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 8 | #include <linux/writeback.h> |
| 9 | #include <linux/pagevec.h> |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 10 | #include "ctree.h" |
| 11 | #include "transaction.h" |
| 12 | #include "btrfs_inode.h" |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 13 | #include "extent_io.h" |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 14 | #include "disk-io.h" |
Anand Jain | ebb8765 | 2016-03-10 17:26:59 +0800 | [diff] [blame] | 15 | #include "compression.h" |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 16 | |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 17 | static struct kmem_cache *btrfs_ordered_extent_cache; |
| 18 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 19 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 20 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 21 | if (entry->file_offset + entry->len < entry->file_offset) |
| 22 | return (u64)-1; |
| 23 | return entry->file_offset + entry->len; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 24 | } |
| 25 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 26 | /* returns NULL if the insertion worked, or it returns the node it did find |
| 27 | * in the tree |
| 28 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 29 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
| 30 | struct rb_node *node) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 31 | { |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 32 | struct rb_node **p = &root->rb_node; |
| 33 | struct rb_node *parent = NULL; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 34 | struct btrfs_ordered_extent *entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 35 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 36 | while (*p) { |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 37 | parent = *p; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 38 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 39 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 40 | if (file_offset < entry->file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 41 | p = &(*p)->rb_left; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 42 | else if (file_offset >= entry_end(entry)) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 43 | p = &(*p)->rb_right; |
| 44 | else |
| 45 | return parent; |
| 46 | } |
| 47 | |
| 48 | rb_link_node(node, parent, p); |
| 49 | rb_insert_color(node, root); |
| 50 | return NULL; |
| 51 | } |
| 52 | |
Jeff Mahoney | 43c04fb | 2011-10-03 23:22:33 -0400 | [diff] [blame] | 53 | static void ordered_data_tree_panic(struct inode *inode, int errno, |
| 54 | u64 offset) |
| 55 | { |
| 56 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
Jeff Mahoney | 5d163e0 | 2016-09-20 10:05:00 -0400 | [diff] [blame] | 57 | btrfs_panic(fs_info, errno, |
| 58 | "Inconsistency in ordered tree at offset %llu", offset); |
Jeff Mahoney | 43c04fb | 2011-10-03 23:22:33 -0400 | [diff] [blame] | 59 | } |
| 60 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 61 | /* |
| 62 | * look for a given offset in the tree, and if it can't be found return the |
| 63 | * first lesser offset |
| 64 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 65 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
| 66 | struct rb_node **prev_ret) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 67 | { |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 68 | struct rb_node *n = root->rb_node; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 69 | struct rb_node *prev = NULL; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 70 | struct rb_node *test; |
| 71 | struct btrfs_ordered_extent *entry; |
| 72 | struct btrfs_ordered_extent *prev_entry = NULL; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 73 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 74 | while (n) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 75 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 76 | prev = n; |
| 77 | prev_entry = entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 78 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 79 | if (file_offset < entry->file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 80 | n = n->rb_left; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 81 | else if (file_offset >= entry_end(entry)) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 82 | n = n->rb_right; |
| 83 | else |
| 84 | return n; |
| 85 | } |
| 86 | if (!prev_ret) |
| 87 | return NULL; |
| 88 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 89 | while (prev && file_offset >= entry_end(prev_entry)) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 90 | test = rb_next(prev); |
| 91 | if (!test) |
| 92 | break; |
| 93 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 94 | rb_node); |
| 95 | if (file_offset < entry_end(prev_entry)) |
| 96 | break; |
| 97 | |
| 98 | prev = test; |
| 99 | } |
| 100 | if (prev) |
| 101 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, |
| 102 | rb_node); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 103 | while (prev && file_offset < entry_end(prev_entry)) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 104 | test = rb_prev(prev); |
| 105 | if (!test) |
| 106 | break; |
| 107 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
| 108 | rb_node); |
| 109 | prev = test; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 110 | } |
| 111 | *prev_ret = prev; |
| 112 | return NULL; |
| 113 | } |
| 114 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 115 | /* |
| 116 | * helper to check if a given offset is inside a given entry |
| 117 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 118 | static int offset_in_entry(struct btrfs_ordered_extent *entry, u64 file_offset) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 119 | { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 120 | if (file_offset < entry->file_offset || |
| 121 | entry->file_offset + entry->len <= file_offset) |
| 122 | return 0; |
| 123 | return 1; |
| 124 | } |
| 125 | |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 126 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
| 127 | u64 len) |
| 128 | { |
| 129 | if (file_offset + len <= entry->file_offset || |
| 130 | entry->file_offset + entry->len <= file_offset) |
| 131 | return 0; |
| 132 | return 1; |
| 133 | } |
| 134 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 135 | /* |
| 136 | * look find the first ordered struct that has this offset, otherwise |
| 137 | * the first one less than this offset |
| 138 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 139 | static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree, |
| 140 | u64 file_offset) |
| 141 | { |
| 142 | struct rb_root *root = &tree->tree; |
Chris Mason | c87fb6f | 2011-01-31 19:54:59 -0500 | [diff] [blame] | 143 | struct rb_node *prev = NULL; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 144 | struct rb_node *ret; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 145 | struct btrfs_ordered_extent *entry; |
| 146 | |
| 147 | if (tree->last) { |
| 148 | entry = rb_entry(tree->last, struct btrfs_ordered_extent, |
| 149 | rb_node); |
| 150 | if (offset_in_entry(entry, file_offset)) |
| 151 | return tree->last; |
| 152 | } |
| 153 | ret = __tree_search(root, file_offset, &prev); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 154 | if (!ret) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 155 | ret = prev; |
| 156 | if (ret) |
| 157 | tree->last = ret; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 158 | return ret; |
| 159 | } |
| 160 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 161 | /* allocate and add a new ordered_extent into the per-inode tree. |
| 162 | * file_offset is the logical offset in the file |
| 163 | * |
| 164 | * start is the disk block number of an extent already reserved in the |
| 165 | * extent allocation tree |
| 166 | * |
| 167 | * len is the length of the extent |
| 168 | * |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 169 | * The tree is given a single reference on the ordered extent that was |
| 170 | * inserted. |
| 171 | */ |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 172 | static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
| 173 | u64 start, u64 len, u64 disk_len, |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 174 | int type, int dio, int compress_type) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 175 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 176 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 177 | struct btrfs_root *root = BTRFS_I(inode)->root; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 178 | struct btrfs_ordered_inode_tree *tree; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 179 | struct rb_node *node; |
| 180 | struct btrfs_ordered_extent *entry; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 181 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 182 | tree = &BTRFS_I(inode)->ordered_tree; |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 183 | entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 184 | if (!entry) |
| 185 | return -ENOMEM; |
| 186 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 187 | entry->file_offset = file_offset; |
| 188 | entry->start = start; |
| 189 | entry->len = len; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 190 | entry->disk_len = disk_len; |
Chris Mason | 8b62b72 | 2009-09-02 16:53:46 -0400 | [diff] [blame] | 191 | entry->bytes_left = len; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 192 | entry->inode = igrab(inode); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 193 | entry->compress_type = compress_type; |
Josef Bacik | 77cef2e | 2013-08-29 13:57:21 -0400 | [diff] [blame] | 194 | entry->truncated_len = (u64)-1; |
Yan Zheng | d899e05 | 2008-10-30 14:25:28 -0400 | [diff] [blame] | 195 | if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) |
Yan Zheng | 80ff385 | 2008-10-30 14:20:02 -0400 | [diff] [blame] | 196 | set_bit(type, &entry->flags); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 197 | |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 198 | if (dio) |
| 199 | set_bit(BTRFS_ORDERED_DIRECT, &entry->flags); |
| 200 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 201 | /* one ref for the tree */ |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 202 | refcount_set(&entry->refs, 1); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 203 | init_waitqueue_head(&entry->wait); |
| 204 | INIT_LIST_HEAD(&entry->list); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 205 | INIT_LIST_HEAD(&entry->root_extent_list); |
Miao Xie | 9afab88 | 2012-10-25 09:41:36 +0000 | [diff] [blame] | 206 | INIT_LIST_HEAD(&entry->work_list); |
| 207 | init_completion(&entry->completion); |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 208 | INIT_LIST_HEAD(&entry->log_list); |
Josef Bacik | 50d9aa9 | 2014-11-21 14:52:38 -0500 | [diff] [blame] | 209 | INIT_LIST_HEAD(&entry->trans_list); |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 210 | |
liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 211 | trace_btrfs_ordered_extent_add(inode, entry); |
| 212 | |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 213 | spin_lock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 214 | node = tree_insert(&tree->tree, file_offset, |
| 215 | &entry->rb_node); |
Jeff Mahoney | 43c04fb | 2011-10-03 23:22:33 -0400 | [diff] [blame] | 216 | if (node) |
| 217 | ordered_data_tree_panic(inode, -EEXIST, file_offset); |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 218 | spin_unlock_irq(&tree->lock); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 219 | |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 220 | spin_lock(&root->ordered_extent_lock); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 221 | list_add_tail(&entry->root_extent_list, |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 222 | &root->ordered_extents); |
| 223 | root->nr_ordered_extents++; |
| 224 | if (root->nr_ordered_extents == 1) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 225 | spin_lock(&fs_info->ordered_root_lock); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 226 | BUG_ON(!list_empty(&root->ordered_root)); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 227 | list_add_tail(&root->ordered_root, &fs_info->ordered_roots); |
| 228 | spin_unlock(&fs_info->ordered_root_lock); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 229 | } |
| 230 | spin_unlock(&root->ordered_extent_lock); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 231 | |
Josef Bacik | 8b62f87 | 2017-10-19 14:15:55 -0400 | [diff] [blame] | 232 | /* |
| 233 | * We don't need the count_max_extents here, we can assume that all of |
| 234 | * that work has been done at higher layers, so this is truly the |
| 235 | * smallest the extent is going to get. |
| 236 | */ |
| 237 | spin_lock(&BTRFS_I(inode)->lock); |
| 238 | btrfs_mod_outstanding_extents(BTRFS_I(inode), 1); |
| 239 | spin_unlock(&BTRFS_I(inode)->lock); |
| 240 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 241 | return 0; |
| 242 | } |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 243 | |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 244 | int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, |
| 245 | u64 start, u64 len, u64 disk_len, int type) |
| 246 | { |
| 247 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 248 | disk_len, type, 0, |
| 249 | BTRFS_COMPRESS_NONE); |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, |
| 253 | u64 start, u64 len, u64 disk_len, int type) |
| 254 | { |
| 255 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 256 | disk_len, type, 1, |
| 257 | BTRFS_COMPRESS_NONE); |
| 258 | } |
| 259 | |
| 260 | int btrfs_add_ordered_extent_compress(struct inode *inode, u64 file_offset, |
| 261 | u64 start, u64 len, u64 disk_len, |
| 262 | int type, int compress_type) |
| 263 | { |
| 264 | return __btrfs_add_ordered_extent(inode, file_offset, start, len, |
| 265 | disk_len, type, 0, |
| 266 | compress_type); |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 267 | } |
| 268 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 269 | /* |
| 270 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 271 | * when an ordered extent is finished. If the list covers more than one |
| 272 | * ordered extent, it is split across multiples. |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 273 | */ |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 274 | void btrfs_add_ordered_sum(struct inode *inode, |
| 275 | struct btrfs_ordered_extent *entry, |
| 276 | struct btrfs_ordered_sum *sum) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 277 | { |
| 278 | struct btrfs_ordered_inode_tree *tree; |
Chris Mason | 1b1e213 | 2008-06-25 16:01:31 -0400 | [diff] [blame] | 279 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 280 | tree = &BTRFS_I(inode)->ordered_tree; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 281 | spin_lock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 282 | list_add_tail(&sum->list, &entry->list); |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 283 | spin_unlock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 284 | } |
| 285 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 286 | /* |
| 287 | * this is used to account for finished IO across a given range |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 288 | * of the file. The IO may span ordered extents. If |
| 289 | * a given ordered_extent is completely done, 1 is returned, otherwise |
| 290 | * 0. |
| 291 | * |
| 292 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used |
| 293 | * to make sure this function only returns 1 once for a given ordered extent. |
| 294 | * |
| 295 | * file_offset is updated to one byte past the range that is recorded as |
| 296 | * complete. This allows you to walk forward in the file. |
| 297 | */ |
| 298 | int btrfs_dec_test_first_ordered_pending(struct inode *inode, |
| 299 | struct btrfs_ordered_extent **cached, |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 300 | u64 *file_offset, u64 io_size, int uptodate) |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 301 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 302 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 303 | struct btrfs_ordered_inode_tree *tree; |
| 304 | struct rb_node *node; |
| 305 | struct btrfs_ordered_extent *entry = NULL; |
| 306 | int ret; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 307 | unsigned long flags; |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 308 | u64 dec_end; |
| 309 | u64 dec_start; |
| 310 | u64 to_dec; |
| 311 | |
| 312 | tree = &BTRFS_I(inode)->ordered_tree; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 313 | spin_lock_irqsave(&tree->lock, flags); |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 314 | node = tree_search(tree, *file_offset); |
| 315 | if (!node) { |
| 316 | ret = 1; |
| 317 | goto out; |
| 318 | } |
| 319 | |
| 320 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 321 | if (!offset_in_entry(entry, *file_offset)) { |
| 322 | ret = 1; |
| 323 | goto out; |
| 324 | } |
| 325 | |
| 326 | dec_start = max(*file_offset, entry->file_offset); |
| 327 | dec_end = min(*file_offset + io_size, entry->file_offset + |
| 328 | entry->len); |
| 329 | *file_offset = dec_end; |
| 330 | if (dec_start > dec_end) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 331 | btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu", |
| 332 | dec_start, dec_end); |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 333 | } |
| 334 | to_dec = dec_end - dec_start; |
| 335 | if (to_dec > entry->bytes_left) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 336 | btrfs_crit(fs_info, |
| 337 | "bad ordered accounting left %llu size %llu", |
| 338 | entry->bytes_left, to_dec); |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 339 | } |
| 340 | entry->bytes_left -= to_dec; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 341 | if (!uptodate) |
| 342 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); |
| 343 | |
Miao Xie | af7a650 | 2014-03-06 13:54:56 +0800 | [diff] [blame] | 344 | if (entry->bytes_left == 0) { |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 345 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
David Sterba | 093258e | 2018-02-26 16:15:17 +0100 | [diff] [blame] | 346 | /* test_and_set_bit implies a barrier */ |
| 347 | cond_wake_up_nomb(&entry->wait); |
Miao Xie | af7a650 | 2014-03-06 13:54:56 +0800 | [diff] [blame] | 348 | } else { |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 349 | ret = 1; |
Miao Xie | af7a650 | 2014-03-06 13:54:56 +0800 | [diff] [blame] | 350 | } |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 351 | out: |
| 352 | if (!ret && cached && entry) { |
| 353 | *cached = entry; |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 354 | refcount_inc(&entry->refs); |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 355 | } |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 356 | spin_unlock_irqrestore(&tree->lock, flags); |
Chris Mason | 163cf09 | 2010-11-28 19:56:33 -0500 | [diff] [blame] | 357 | return ret == 0; |
| 358 | } |
| 359 | |
| 360 | /* |
| 361 | * this is used to account for finished IO across a given range |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 362 | * of the file. The IO should not span ordered extents. If |
| 363 | * a given ordered_extent is completely done, 1 is returned, otherwise |
| 364 | * 0. |
| 365 | * |
| 366 | * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used |
| 367 | * to make sure this function only returns 1 once for a given ordered extent. |
| 368 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 369 | int btrfs_dec_test_ordered_pending(struct inode *inode, |
Josef Bacik | 5a1a3df | 2010-02-02 20:51:14 +0000 | [diff] [blame] | 370 | struct btrfs_ordered_extent **cached, |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 371 | u64 file_offset, u64 io_size, int uptodate) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 372 | { |
| 373 | struct btrfs_ordered_inode_tree *tree; |
| 374 | struct rb_node *node; |
Josef Bacik | 5a1a3df | 2010-02-02 20:51:14 +0000 | [diff] [blame] | 375 | struct btrfs_ordered_extent *entry = NULL; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 376 | unsigned long flags; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 377 | int ret; |
| 378 | |
| 379 | tree = &BTRFS_I(inode)->ordered_tree; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 380 | spin_lock_irqsave(&tree->lock, flags); |
| 381 | if (cached && *cached) { |
| 382 | entry = *cached; |
| 383 | goto have_entry; |
| 384 | } |
| 385 | |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 386 | node = tree_search(tree, file_offset); |
| 387 | if (!node) { |
| 388 | ret = 1; |
| 389 | goto out; |
| 390 | } |
| 391 | |
| 392 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 393 | have_entry: |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 394 | if (!offset_in_entry(entry, file_offset)) { |
| 395 | ret = 1; |
| 396 | goto out; |
| 397 | } |
| 398 | |
Chris Mason | 8b62b72 | 2009-09-02 16:53:46 -0400 | [diff] [blame] | 399 | if (io_size > entry->bytes_left) { |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 400 | btrfs_crit(BTRFS_I(inode)->root->fs_info, |
| 401 | "bad ordered accounting left %llu size %llu", |
Geert Uytterhoeven | c1c9ff7 | 2013-08-20 13:20:07 +0200 | [diff] [blame] | 402 | entry->bytes_left, io_size); |
Chris Mason | 8b62b72 | 2009-09-02 16:53:46 -0400 | [diff] [blame] | 403 | } |
| 404 | entry->bytes_left -= io_size; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 405 | if (!uptodate) |
| 406 | set_bit(BTRFS_ORDERED_IOERR, &entry->flags); |
| 407 | |
Miao Xie | af7a650 | 2014-03-06 13:54:56 +0800 | [diff] [blame] | 408 | if (entry->bytes_left == 0) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 409 | ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); |
David Sterba | 093258e | 2018-02-26 16:15:17 +0100 | [diff] [blame] | 410 | /* test_and_set_bit implies a barrier */ |
| 411 | cond_wake_up_nomb(&entry->wait); |
Miao Xie | af7a650 | 2014-03-06 13:54:56 +0800 | [diff] [blame] | 412 | } else { |
Chris Mason | 8b62b72 | 2009-09-02 16:53:46 -0400 | [diff] [blame] | 413 | ret = 1; |
Miao Xie | af7a650 | 2014-03-06 13:54:56 +0800 | [diff] [blame] | 414 | } |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 415 | out: |
Josef Bacik | 5a1a3df | 2010-02-02 20:51:14 +0000 | [diff] [blame] | 416 | if (!ret && cached && entry) { |
| 417 | *cached = entry; |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 418 | refcount_inc(&entry->refs); |
Josef Bacik | 5a1a3df | 2010-02-02 20:51:14 +0000 | [diff] [blame] | 419 | } |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 420 | spin_unlock_irqrestore(&tree->lock, flags); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 421 | return ret == 0; |
| 422 | } |
| 423 | |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 424 | /* Needs to either be called under a log transaction or the log_mutex */ |
Nikolay Borisov | 2234663 | 2017-01-18 00:31:39 +0200 | [diff] [blame] | 425 | void btrfs_get_logged_extents(struct btrfs_inode *inode, |
Filipe Manana | 0870295 | 2014-11-13 17:00:35 +0000 | [diff] [blame] | 426 | struct list_head *logged_list, |
| 427 | const loff_t start, |
| 428 | const loff_t end) |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 429 | { |
| 430 | struct btrfs_ordered_inode_tree *tree; |
| 431 | struct btrfs_ordered_extent *ordered; |
| 432 | struct rb_node *n; |
Filipe Manana | 0870295 | 2014-11-13 17:00:35 +0000 | [diff] [blame] | 433 | struct rb_node *prev; |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 434 | |
Nikolay Borisov | 2234663 | 2017-01-18 00:31:39 +0200 | [diff] [blame] | 435 | tree = &inode->ordered_tree; |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 436 | spin_lock_irq(&tree->lock); |
Filipe Manana | 0870295 | 2014-11-13 17:00:35 +0000 | [diff] [blame] | 437 | n = __tree_search(&tree->tree, end, &prev); |
| 438 | if (!n) |
| 439 | n = prev; |
| 440 | for (; n; n = rb_prev(n)) { |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 441 | ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
Filipe Manana | 0870295 | 2014-11-13 17:00:35 +0000 | [diff] [blame] | 442 | if (ordered->file_offset > end) |
| 443 | continue; |
| 444 | if (entry_end(ordered) <= start) |
| 445 | break; |
Filipe Manana | 4d884fc | 2015-02-09 17:17:43 +0000 | [diff] [blame] | 446 | if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) |
Josef Bacik | 50d9aa9 | 2014-11-21 14:52:38 -0500 | [diff] [blame] | 447 | continue; |
Filipe Manana | 0870295 | 2014-11-13 17:00:35 +0000 | [diff] [blame] | 448 | list_add(&ordered->log_list, logged_list); |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 449 | refcount_inc(&ordered->refs); |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 450 | } |
| 451 | spin_unlock_irq(&tree->lock); |
| 452 | } |
| 453 | |
Miao Xie | 827463c | 2014-01-14 20:31:51 +0800 | [diff] [blame] | 454 | void btrfs_put_logged_extents(struct list_head *logged_list) |
| 455 | { |
| 456 | struct btrfs_ordered_extent *ordered; |
| 457 | |
| 458 | while (!list_empty(logged_list)) { |
| 459 | ordered = list_first_entry(logged_list, |
| 460 | struct btrfs_ordered_extent, |
| 461 | log_list); |
| 462 | list_del_init(&ordered->log_list); |
| 463 | btrfs_put_ordered_extent(ordered); |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | void btrfs_submit_logged_extents(struct list_head *logged_list, |
| 468 | struct btrfs_root *log) |
| 469 | { |
| 470 | int index = log->log_transid % 2; |
| 471 | |
| 472 | spin_lock_irq(&log->log_extents_lock[index]); |
| 473 | list_splice_tail(logged_list, &log->logged_list[index]); |
| 474 | spin_unlock_irq(&log->log_extents_lock[index]); |
| 475 | } |
| 476 | |
Josef Bacik | 50d9aa9 | 2014-11-21 14:52:38 -0500 | [diff] [blame] | 477 | void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, |
| 478 | struct btrfs_root *log, u64 transid) |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 479 | { |
| 480 | struct btrfs_ordered_extent *ordered; |
| 481 | int index = transid % 2; |
| 482 | |
| 483 | spin_lock_irq(&log->log_extents_lock[index]); |
| 484 | while (!list_empty(&log->logged_list[index])) { |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 485 | struct inode *inode; |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 486 | ordered = list_first_entry(&log->logged_list[index], |
| 487 | struct btrfs_ordered_extent, |
| 488 | log_list); |
| 489 | list_del_init(&ordered->log_list); |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 490 | inode = ordered->inode; |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 491 | spin_unlock_irq(&log->log_extents_lock[index]); |
Liu Bo | 98ce2de | 2014-07-17 16:08:36 +0800 | [diff] [blame] | 492 | |
| 493 | if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) && |
| 494 | !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) { |
Liu Bo | 98ce2de | 2014-07-17 16:08:36 +0800 | [diff] [blame] | 495 | u64 start = ordered->file_offset; |
| 496 | u64 end = ordered->file_offset + ordered->len - 1; |
| 497 | |
| 498 | WARN_ON(!inode); |
| 499 | filemap_fdatawrite_range(inode->i_mapping, start, end); |
| 500 | } |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 501 | wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, |
| 502 | &ordered->flags)); |
Liu Bo | 98ce2de | 2014-07-17 16:08:36 +0800 | [diff] [blame] | 503 | |
Filipe Manana | 7558c8b | 2015-04-17 17:08:37 +0100 | [diff] [blame] | 504 | /* |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 505 | * In order to keep us from losing our ordered extent |
| 506 | * information when committing the transaction we have to make |
| 507 | * sure that any logged extents are completed when we go to |
| 508 | * commit the transaction. To do this we simply increase the |
| 509 | * current transactions pending_ordered counter and decrement it |
| 510 | * when the ordered extent completes. |
Filipe Manana | 7558c8b | 2015-04-17 17:08:37 +0100 | [diff] [blame] | 511 | */ |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 512 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { |
| 513 | struct btrfs_ordered_inode_tree *tree; |
Filipe Manana | 7558c8b | 2015-04-17 17:08:37 +0100 | [diff] [blame] | 514 | |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 515 | tree = &BTRFS_I(inode)->ordered_tree; |
| 516 | spin_lock_irq(&tree->lock); |
| 517 | if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { |
| 518 | set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); |
| 519 | atomic_inc(&trans->transaction->pending_ordered); |
| 520 | } |
| 521 | spin_unlock_irq(&tree->lock); |
| 522 | } |
| 523 | btrfs_put_ordered_extent(ordered); |
Josef Bacik | 2ab28f3 | 2012-10-12 15:27:49 -0400 | [diff] [blame] | 524 | spin_lock_irq(&log->log_extents_lock[index]); |
| 525 | } |
| 526 | spin_unlock_irq(&log->log_extents_lock[index]); |
| 527 | } |
| 528 | |
| 529 | void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid) |
| 530 | { |
| 531 | struct btrfs_ordered_extent *ordered; |
| 532 | int index = transid % 2; |
| 533 | |
| 534 | spin_lock_irq(&log->log_extents_lock[index]); |
| 535 | while (!list_empty(&log->logged_list[index])) { |
| 536 | ordered = list_first_entry(&log->logged_list[index], |
| 537 | struct btrfs_ordered_extent, |
| 538 | log_list); |
| 539 | list_del_init(&ordered->log_list); |
| 540 | spin_unlock_irq(&log->log_extents_lock[index]); |
| 541 | btrfs_put_ordered_extent(ordered); |
| 542 | spin_lock_irq(&log->log_extents_lock[index]); |
| 543 | } |
| 544 | spin_unlock_irq(&log->log_extents_lock[index]); |
| 545 | } |
| 546 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 547 | /* |
| 548 | * used to drop a reference on an ordered extent. This will free |
| 549 | * the extent if the last reference is dropped |
| 550 | */ |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 551 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 552 | { |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 553 | struct list_head *cur; |
| 554 | struct btrfs_ordered_sum *sum; |
| 555 | |
liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 556 | trace_btrfs_ordered_extent_put(entry->inode, entry); |
| 557 | |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 558 | if (refcount_dec_and_test(&entry->refs)) { |
Filipe Manana | 61de718 | 2015-07-01 12:13:10 +0100 | [diff] [blame] | 559 | ASSERT(list_empty(&entry->log_list)); |
| 560 | ASSERT(list_empty(&entry->trans_list)); |
| 561 | ASSERT(list_empty(&entry->root_extent_list)); |
| 562 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 563 | if (entry->inode) |
| 564 | btrfs_add_delayed_iput(entry->inode); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 565 | while (!list_empty(&entry->list)) { |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 566 | cur = entry->list.next; |
| 567 | sum = list_entry(cur, struct btrfs_ordered_sum, list); |
| 568 | list_del(&sum->list); |
| 569 | kfree(sum); |
| 570 | } |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 571 | kmem_cache_free(btrfs_ordered_extent_cache, entry); |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 572 | } |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 573 | } |
| 574 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 575 | /* |
| 576 | * remove an ordered extent from the tree. No references are dropped |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 577 | * and waiters are woken up. |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 578 | */ |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 579 | void btrfs_remove_ordered_extent(struct inode *inode, |
| 580 | struct btrfs_ordered_extent *entry) |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 581 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 582 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 583 | struct btrfs_ordered_inode_tree *tree; |
Josef Bacik | 8b62f87 | 2017-10-19 14:15:55 -0400 | [diff] [blame] | 584 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); |
| 585 | struct btrfs_root *root = btrfs_inode->root; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 586 | struct rb_node *node; |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 587 | bool dec_pending_ordered = false; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 588 | |
Josef Bacik | 8b62f87 | 2017-10-19 14:15:55 -0400 | [diff] [blame] | 589 | /* This is paired with btrfs_add_ordered_extent. */ |
| 590 | spin_lock(&btrfs_inode->lock); |
| 591 | btrfs_mod_outstanding_extents(btrfs_inode, -1); |
| 592 | spin_unlock(&btrfs_inode->lock); |
| 593 | if (root != fs_info->tree_root) |
Qu Wenruo | 43b1859 | 2017-12-12 15:34:32 +0800 | [diff] [blame] | 594 | btrfs_delalloc_release_metadata(btrfs_inode, entry->len, false); |
Josef Bacik | 8b62f87 | 2017-10-19 14:15:55 -0400 | [diff] [blame] | 595 | |
| 596 | tree = &btrfs_inode->ordered_tree; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 597 | spin_lock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 598 | node = &entry->rb_node; |
Chris Mason | dc17ff8 | 2008-01-08 15:46:30 -0500 | [diff] [blame] | 599 | rb_erase(node, &tree->tree); |
Filipe Manana | 61de718 | 2015-07-01 12:13:10 +0100 | [diff] [blame] | 600 | RB_CLEAR_NODE(node); |
Filipe David Borba Manana | 1b8e7e4 | 2013-11-22 18:54:58 +0000 | [diff] [blame] | 601 | if (tree->last == node) |
| 602 | tree->last = NULL; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 603 | set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 604 | if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags)) |
| 605 | dec_pending_ordered = true; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 606 | spin_unlock_irq(&tree->lock); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 607 | |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 608 | /* |
| 609 | * The current running transaction is waiting on us, we need to let it |
| 610 | * know that we're complete and wake it up. |
| 611 | */ |
| 612 | if (dec_pending_ordered) { |
| 613 | struct btrfs_transaction *trans; |
| 614 | |
| 615 | /* |
| 616 | * The checks for trans are just a formality, it should be set, |
| 617 | * but if it isn't we don't want to deref/assert under the spin |
| 618 | * lock, so be nice and check if trans is set, but ASSERT() so |
| 619 | * if it isn't set a developer will notice. |
| 620 | */ |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 621 | spin_lock(&fs_info->trans_lock); |
| 622 | trans = fs_info->running_transaction; |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 623 | if (trans) |
Elena Reshetova | 9b64f57 | 2017-03-03 10:55:11 +0200 | [diff] [blame] | 624 | refcount_inc(&trans->use_count); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 625 | spin_unlock(&fs_info->trans_lock); |
Josef Bacik | 161c3549 | 2015-09-24 16:17:39 -0400 | [diff] [blame] | 626 | |
| 627 | ASSERT(trans); |
| 628 | if (trans) { |
| 629 | if (atomic_dec_and_test(&trans->pending_ordered)) |
| 630 | wake_up(&trans->pending_wait); |
| 631 | btrfs_put_transaction(trans); |
| 632 | } |
| 633 | } |
| 634 | |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 635 | spin_lock(&root->ordered_extent_lock); |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 636 | list_del_init(&entry->root_extent_list); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 637 | root->nr_ordered_extents--; |
Chris Mason | 5a3f23d | 2009-03-31 13:27:11 -0400 | [diff] [blame] | 638 | |
liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 639 | trace_btrfs_ordered_extent_remove(inode, entry); |
| 640 | |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 641 | if (!root->nr_ordered_extents) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 642 | spin_lock(&fs_info->ordered_root_lock); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 643 | BUG_ON(list_empty(&root->ordered_root)); |
| 644 | list_del_init(&root->ordered_root); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 645 | spin_unlock(&fs_info->ordered_root_lock); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 646 | } |
| 647 | spin_unlock(&root->ordered_extent_lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 648 | wake_up(&entry->wait); |
Chris Mason | 81d7ed2 | 2008-04-25 08:51:48 -0400 | [diff] [blame] | 649 | } |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 650 | |
Qu Wenruo | d458b05 | 2014-02-28 10:46:19 +0800 | [diff] [blame] | 651 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
Miao Xie | 9afab88 | 2012-10-25 09:41:36 +0000 | [diff] [blame] | 652 | { |
| 653 | struct btrfs_ordered_extent *ordered; |
| 654 | |
| 655 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); |
| 656 | btrfs_start_ordered_extent(ordered->inode, ordered, 1); |
| 657 | complete(&ordered->completion); |
| 658 | } |
| 659 | |
Chris Mason | d352ac6 | 2008-09-29 15:18:18 -0400 | [diff] [blame] | 660 | /* |
| 661 | * wait for all the ordered extents in a root. This is done when balancing |
| 662 | * space between drives. |
| 663 | */ |
Chris Mason | 6374e57a | 2017-06-23 09:48:21 -0700 | [diff] [blame] | 664 | u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, |
Filipe Manana | 578def7 | 2016-04-26 15:36:38 +0100 | [diff] [blame] | 665 | const u64 range_start, const u64 range_len) |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 666 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 667 | struct btrfs_fs_info *fs_info = root->fs_info; |
Filipe Manana | 578def7 | 2016-04-26 15:36:38 +0100 | [diff] [blame] | 668 | LIST_HEAD(splice); |
| 669 | LIST_HEAD(skipped); |
| 670 | LIST_HEAD(works); |
Miao Xie | 9afab88 | 2012-10-25 09:41:36 +0000 | [diff] [blame] | 671 | struct btrfs_ordered_extent *ordered, *next; |
Chris Mason | 6374e57a | 2017-06-23 09:48:21 -0700 | [diff] [blame] | 672 | u64 count = 0; |
Filipe Manana | 578def7 | 2016-04-26 15:36:38 +0100 | [diff] [blame] | 673 | const u64 range_end = range_start + range_len; |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 674 | |
Miao Xie | 31f3d25 | 2014-03-06 13:55:02 +0800 | [diff] [blame] | 675 | mutex_lock(&root->ordered_extent_mutex); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 676 | spin_lock(&root->ordered_extent_lock); |
| 677 | list_splice_init(&root->ordered_extents, &splice); |
Miao Xie | b024419 | 2013-11-04 23:13:25 +0800 | [diff] [blame] | 678 | while (!list_empty(&splice) && nr) { |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 679 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
| 680 | root_extent_list); |
Filipe Manana | 578def7 | 2016-04-26 15:36:38 +0100 | [diff] [blame] | 681 | |
| 682 | if (range_end <= ordered->start || |
| 683 | ordered->start + ordered->disk_len <= range_start) { |
| 684 | list_move_tail(&ordered->root_extent_list, &skipped); |
| 685 | cond_resched_lock(&root->ordered_extent_lock); |
| 686 | continue; |
| 687 | } |
| 688 | |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 689 | list_move_tail(&ordered->root_extent_list, |
| 690 | &root->ordered_extents); |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 691 | refcount_inc(&ordered->refs); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 692 | spin_unlock(&root->ordered_extent_lock); |
| 693 | |
Qu Wenruo | a44903a | 2014-02-28 10:46:09 +0800 | [diff] [blame] | 694 | btrfs_init_work(&ordered->flush_work, |
Liu Bo | 9e0af23 | 2014-08-15 23:36:53 +0800 | [diff] [blame] | 695 | btrfs_flush_delalloc_helper, |
Qu Wenruo | a44903a | 2014-02-28 10:46:09 +0800 | [diff] [blame] | 696 | btrfs_run_ordered_extent_work, NULL, NULL); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 697 | list_add_tail(&ordered->work_list, &works); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 698 | btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 699 | |
Miao Xie | 9afab88 | 2012-10-25 09:41:36 +0000 | [diff] [blame] | 700 | cond_resched(); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 701 | spin_lock(&root->ordered_extent_lock); |
Chris Mason | 6374e57a | 2017-06-23 09:48:21 -0700 | [diff] [blame] | 702 | if (nr != U64_MAX) |
Miao Xie | b024419 | 2013-11-04 23:13:25 +0800 | [diff] [blame] | 703 | nr--; |
| 704 | count++; |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 705 | } |
Filipe Manana | 578def7 | 2016-04-26 15:36:38 +0100 | [diff] [blame] | 706 | list_splice_tail(&skipped, &root->ordered_extents); |
Miao Xie | b024419 | 2013-11-04 23:13:25 +0800 | [diff] [blame] | 707 | list_splice_tail(&splice, &root->ordered_extents); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 708 | spin_unlock(&root->ordered_extent_lock); |
Miao Xie | 9afab88 | 2012-10-25 09:41:36 +0000 | [diff] [blame] | 709 | |
| 710 | list_for_each_entry_safe(ordered, next, &works, work_list) { |
| 711 | list_del_init(&ordered->work_list); |
| 712 | wait_for_completion(&ordered->completion); |
Miao Xie | 9afab88 | 2012-10-25 09:41:36 +0000 | [diff] [blame] | 713 | btrfs_put_ordered_extent(ordered); |
Miao Xie | 9afab88 | 2012-10-25 09:41:36 +0000 | [diff] [blame] | 714 | cond_resched(); |
| 715 | } |
Miao Xie | 31f3d25 | 2014-03-06 13:55:02 +0800 | [diff] [blame] | 716 | mutex_unlock(&root->ordered_extent_mutex); |
Miao Xie | b024419 | 2013-11-04 23:13:25 +0800 | [diff] [blame] | 717 | |
| 718 | return count; |
Chris Mason | 3eaa288 | 2008-07-24 11:57:52 -0400 | [diff] [blame] | 719 | } |
| 720 | |
Chris Mason | 6374e57a | 2017-06-23 09:48:21 -0700 | [diff] [blame] | 721 | u64 btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, |
| 722 | const u64 range_start, const u64 range_len) |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 723 | { |
| 724 | struct btrfs_root *root; |
| 725 | struct list_head splice; |
Chris Mason | 6374e57a | 2017-06-23 09:48:21 -0700 | [diff] [blame] | 726 | u64 total_done = 0; |
| 727 | u64 done; |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 728 | |
| 729 | INIT_LIST_HEAD(&splice); |
| 730 | |
Miao Xie | 8b9d83c | 2014-03-06 13:54:55 +0800 | [diff] [blame] | 731 | mutex_lock(&fs_info->ordered_operations_mutex); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 732 | spin_lock(&fs_info->ordered_root_lock); |
| 733 | list_splice_init(&fs_info->ordered_roots, &splice); |
Miao Xie | b024419 | 2013-11-04 23:13:25 +0800 | [diff] [blame] | 734 | while (!list_empty(&splice) && nr) { |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 735 | root = list_first_entry(&splice, struct btrfs_root, |
| 736 | ordered_root); |
| 737 | root = btrfs_grab_fs_root(root); |
| 738 | BUG_ON(!root); |
| 739 | list_move_tail(&root->ordered_root, |
| 740 | &fs_info->ordered_roots); |
| 741 | spin_unlock(&fs_info->ordered_root_lock); |
| 742 | |
Filipe Manana | 578def7 | 2016-04-26 15:36:38 +0100 | [diff] [blame] | 743 | done = btrfs_wait_ordered_extents(root, nr, |
| 744 | range_start, range_len); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 745 | btrfs_put_fs_root(root); |
Filipe Manana | f0e9b7d | 2016-05-14 09:12:53 +0100 | [diff] [blame] | 746 | total_done += done; |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 747 | |
| 748 | spin_lock(&fs_info->ordered_root_lock); |
Chris Mason | 6374e57a | 2017-06-23 09:48:21 -0700 | [diff] [blame] | 749 | if (nr != U64_MAX) { |
Miao Xie | b024419 | 2013-11-04 23:13:25 +0800 | [diff] [blame] | 750 | nr -= done; |
Miao Xie | b024419 | 2013-11-04 23:13:25 +0800 | [diff] [blame] | 751 | } |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 752 | } |
Miao Xie | 931aa87 | 2013-11-14 17:33:21 +0800 | [diff] [blame] | 753 | list_splice_tail(&splice, &fs_info->ordered_roots); |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 754 | spin_unlock(&fs_info->ordered_root_lock); |
Miao Xie | 8b9d83c | 2014-03-06 13:54:55 +0800 | [diff] [blame] | 755 | mutex_unlock(&fs_info->ordered_operations_mutex); |
Filipe Manana | f0e9b7d | 2016-05-14 09:12:53 +0100 | [diff] [blame] | 756 | |
| 757 | return total_done; |
Miao Xie | 199c2a9 | 2013-05-15 07:48:23 +0000 | [diff] [blame] | 758 | } |
| 759 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 760 | /* |
| 761 | * Used to start IO or wait for a given ordered extent to finish. |
| 762 | * |
| 763 | * If wait is one, this effectively waits on page writeback for all the pages |
| 764 | * in the extent, and it waits on the io completion code to insert |
| 765 | * metadata into the btree corresponding to the extent |
| 766 | */ |
| 767 | void btrfs_start_ordered_extent(struct inode *inode, |
| 768 | struct btrfs_ordered_extent *entry, |
| 769 | int wait) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 770 | { |
| 771 | u64 start = entry->file_offset; |
| 772 | u64 end = start + entry->len - 1; |
| 773 | |
liubo | 1abe9b8 | 2011-03-24 11:18:59 +0000 | [diff] [blame] | 774 | trace_btrfs_ordered_extent_start(inode, entry); |
| 775 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 776 | /* |
| 777 | * pages in the range can be dirty, clean or writeback. We |
| 778 | * start IO on any dirty ones so the wait doesn't stall waiting |
Artem Bityutskiy | b257031 | 2012-07-25 18:12:06 +0300 | [diff] [blame] | 779 | * for the flusher thread to find them |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 780 | */ |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 781 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
| 782 | filemap_fdatawrite_range(inode->i_mapping, start, end); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 783 | if (wait) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 784 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, |
| 785 | &entry->flags)); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 786 | } |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 787 | } |
| 788 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 789 | /* |
| 790 | * Used to wait on ordered extents across a large range of bytes. |
| 791 | */ |
Josef Bacik | 0ef8b72 | 2013-10-25 16:13:35 -0400 | [diff] [blame] | 792 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 793 | { |
Josef Bacik | 0ef8b72 | 2013-10-25 16:13:35 -0400 | [diff] [blame] | 794 | int ret = 0; |
Filipe Manana | 28aeeac | 2015-05-05 19:03:10 +0100 | [diff] [blame] | 795 | int ret_wb = 0; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 796 | u64 end; |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 797 | u64 orig_end; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 798 | struct btrfs_ordered_extent *ordered; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 799 | |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 800 | if (start + len < start) { |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 801 | orig_end = INT_LIMIT(loff_t); |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 802 | } else { |
| 803 | orig_end = start + len - 1; |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 804 | if (orig_end > INT_LIMIT(loff_t)) |
| 805 | orig_end = INT_LIMIT(loff_t); |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 806 | } |
Josef Bacik | 551ebb2 | 2012-04-23 14:41:09 -0400 | [diff] [blame] | 807 | |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 808 | /* start IO across the range first to instantiate any delalloc |
| 809 | * extents |
| 810 | */ |
Filipe Manana | 728404d | 2014-10-10 09:43:11 +0100 | [diff] [blame] | 811 | ret = btrfs_fdatawrite_range(inode, start, orig_end); |
Josef Bacik | 0ef8b72 | 2013-10-25 16:13:35 -0400 | [diff] [blame] | 812 | if (ret) |
| 813 | return ret; |
Filipe Manana | 728404d | 2014-10-10 09:43:11 +0100 | [diff] [blame] | 814 | |
Filipe Manana | 28aeeac | 2015-05-05 19:03:10 +0100 | [diff] [blame] | 815 | /* |
| 816 | * If we have a writeback error don't return immediately. Wait first |
| 817 | * for any ordered extents that haven't completed yet. This is to make |
| 818 | * sure no one can dirty the same page ranges and call writepages() |
| 819 | * before the ordered extents complete - to avoid failures (-EEXIST) |
| 820 | * when adding the new ordered extents to the ordered tree. |
| 821 | */ |
| 822 | ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end); |
Chris Mason | f421950 | 2008-07-22 11:18:09 -0400 | [diff] [blame] | 823 | |
| 824 | end = orig_end; |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 825 | while (1) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 826 | ordered = btrfs_lookup_first_ordered_extent(inode, end); |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 827 | if (!ordered) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 828 | break; |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 829 | if (ordered->file_offset > orig_end) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 830 | btrfs_put_ordered_extent(ordered); |
| 831 | break; |
| 832 | } |
Filipe David Borba Manana | b52abf1 | 2013-11-06 15:12:40 +0000 | [diff] [blame] | 833 | if (ordered->file_offset + ordered->len <= start) { |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 834 | btrfs_put_ordered_extent(ordered); |
| 835 | break; |
| 836 | } |
Chris Mason | e5a2217 | 2008-07-18 20:42:20 -0400 | [diff] [blame] | 837 | btrfs_start_ordered_extent(inode, ordered, 1); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 838 | end = ordered->file_offset; |
Josef Bacik | 0ef8b72 | 2013-10-25 16:13:35 -0400 | [diff] [blame] | 839 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
| 840 | ret = -EIO; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 841 | btrfs_put_ordered_extent(ordered); |
Josef Bacik | 0ef8b72 | 2013-10-25 16:13:35 -0400 | [diff] [blame] | 842 | if (ret || end == 0 || end == start) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 843 | break; |
| 844 | end--; |
| 845 | } |
Filipe Manana | 28aeeac | 2015-05-05 19:03:10 +0100 | [diff] [blame] | 846 | return ret_wb ? ret_wb : ret; |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 847 | } |
| 848 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 849 | /* |
| 850 | * find an ordered extent corresponding to file_offset. return NULL if |
| 851 | * nothing is found, otherwise take a reference on the extent and return it |
| 852 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 853 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode, |
| 854 | u64 file_offset) |
| 855 | { |
| 856 | struct btrfs_ordered_inode_tree *tree; |
| 857 | struct rb_node *node; |
| 858 | struct btrfs_ordered_extent *entry = NULL; |
| 859 | |
| 860 | tree = &BTRFS_I(inode)->ordered_tree; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 861 | spin_lock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 862 | node = tree_search(tree, file_offset); |
| 863 | if (!node) |
| 864 | goto out; |
| 865 | |
| 866 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 867 | if (!offset_in_entry(entry, file_offset)) |
| 868 | entry = NULL; |
| 869 | if (entry) |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 870 | refcount_inc(&entry->refs); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 871 | out: |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 872 | spin_unlock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 873 | return entry; |
| 874 | } |
| 875 | |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 876 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
| 877 | * extents that exist in the range, rather than just the start of the range. |
| 878 | */ |
Nikolay Borisov | a776c6f | 2017-02-20 13:50:49 +0200 | [diff] [blame] | 879 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range( |
| 880 | struct btrfs_inode *inode, u64 file_offset, u64 len) |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 881 | { |
| 882 | struct btrfs_ordered_inode_tree *tree; |
| 883 | struct rb_node *node; |
| 884 | struct btrfs_ordered_extent *entry = NULL; |
| 885 | |
Nikolay Borisov | a776c6f | 2017-02-20 13:50:49 +0200 | [diff] [blame] | 886 | tree = &inode->ordered_tree; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 887 | spin_lock_irq(&tree->lock); |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 888 | node = tree_search(tree, file_offset); |
| 889 | if (!node) { |
| 890 | node = tree_search(tree, file_offset + len); |
| 891 | if (!node) |
| 892 | goto out; |
| 893 | } |
| 894 | |
| 895 | while (1) { |
| 896 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
| 897 | if (range_overlaps(entry, file_offset, len)) |
| 898 | break; |
| 899 | |
| 900 | if (entry->file_offset >= file_offset + len) { |
| 901 | entry = NULL; |
| 902 | break; |
| 903 | } |
| 904 | entry = NULL; |
| 905 | node = rb_next(node); |
| 906 | if (!node) |
| 907 | break; |
| 908 | } |
| 909 | out: |
| 910 | if (entry) |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 911 | refcount_inc(&entry->refs); |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 912 | spin_unlock_irq(&tree->lock); |
Josef Bacik | 4b46fce | 2010-05-23 11:00:55 -0400 | [diff] [blame] | 913 | return entry; |
| 914 | } |
| 915 | |
Filipe Manana | b659ef0 | 2015-03-31 14:16:52 +0100 | [diff] [blame] | 916 | bool btrfs_have_ordered_extents_in_range(struct inode *inode, |
| 917 | u64 file_offset, |
| 918 | u64 len) |
| 919 | { |
| 920 | struct btrfs_ordered_extent *oe; |
| 921 | |
Nikolay Borisov | a776c6f | 2017-02-20 13:50:49 +0200 | [diff] [blame] | 922 | oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len); |
Filipe Manana | b659ef0 | 2015-03-31 14:16:52 +0100 | [diff] [blame] | 923 | if (oe) { |
| 924 | btrfs_put_ordered_extent(oe); |
| 925 | return true; |
| 926 | } |
| 927 | return false; |
| 928 | } |
| 929 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 930 | /* |
| 931 | * lookup and return any extent before 'file_offset'. NULL is returned |
| 932 | * if none is found |
| 933 | */ |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 934 | struct btrfs_ordered_extent * |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 935 | btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset) |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 936 | { |
| 937 | struct btrfs_ordered_inode_tree *tree; |
| 938 | struct rb_node *node; |
| 939 | struct btrfs_ordered_extent *entry = NULL; |
| 940 | |
| 941 | tree = &BTRFS_I(inode)->ordered_tree; |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 942 | spin_lock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 943 | node = tree_search(tree, file_offset); |
| 944 | if (!node) |
| 945 | goto out; |
| 946 | |
| 947 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
Elena Reshetova | e76edab | 2017-03-03 10:55:13 +0200 | [diff] [blame] | 948 | refcount_inc(&entry->refs); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 949 | out: |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 950 | spin_unlock_irq(&tree->lock); |
Chris Mason | e6dcd2d | 2008-07-17 12:53:50 -0400 | [diff] [blame] | 951 | return entry; |
| 952 | } |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 953 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 954 | /* |
| 955 | * After an extent is done, call this to conditionally update the on disk |
| 956 | * i_size. i_size is updated to cover any fully written part of the file. |
| 957 | */ |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 958 | int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 959 | struct btrfs_ordered_extent *ordered) |
| 960 | { |
| 961 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 962 | u64 disk_i_size; |
| 963 | u64 new_i_size; |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 964 | u64 i_size = i_size_read(inode); |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 965 | struct rb_node *node; |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 966 | struct rb_node *prev = NULL; |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 967 | struct btrfs_ordered_extent *test; |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 968 | int ret = 1; |
Wang Xiaoguang | c0d2f61 | 2016-06-22 09:57:01 +0800 | [diff] [blame] | 969 | u64 orig_offset = offset; |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 970 | |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 971 | spin_lock_irq(&tree->lock); |
Josef Bacik | 77cef2e | 2013-08-29 13:57:21 -0400 | [diff] [blame] | 972 | if (ordered) { |
| 973 | offset = entry_end(ordered); |
| 974 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) |
| 975 | offset = min(offset, |
| 976 | ordered->file_offset + |
| 977 | ordered->truncated_len); |
| 978 | } else { |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 979 | offset = ALIGN(offset, btrfs_inode_sectorsize(inode)); |
Josef Bacik | 77cef2e | 2013-08-29 13:57:21 -0400 | [diff] [blame] | 980 | } |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 981 | disk_i_size = BTRFS_I(inode)->disk_i_size; |
| 982 | |
Liu Bo | 19fd2df | 2016-12-01 13:01:02 -0800 | [diff] [blame] | 983 | /* |
| 984 | * truncate file. |
| 985 | * If ordered is not NULL, then this is called from endio and |
| 986 | * disk_i_size will be updated by either truncate itself or any |
| 987 | * in-flight IOs which are inside the disk_i_size. |
| 988 | * |
| 989 | * Because btrfs_setsize() may set i_size with disk_i_size if truncate |
| 990 | * fails somehow, we need to make sure we have a precise disk_i_size by |
| 991 | * updating it as usual. |
| 992 | * |
| 993 | */ |
| 994 | if (!ordered && disk_i_size > i_size) { |
Wang Xiaoguang | c0d2f61 | 2016-06-22 09:57:01 +0800 | [diff] [blame] | 995 | BTRFS_I(inode)->disk_i_size = orig_offset; |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 996 | ret = 0; |
| 997 | goto out; |
| 998 | } |
| 999 | |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1000 | /* |
| 1001 | * if the disk i_size is already at the inode->i_size, or |
| 1002 | * this ordered extent is inside the disk i_size, we're done |
| 1003 | */ |
Josef Bacik | 5d1f402 | 2013-01-30 14:17:31 -0500 | [diff] [blame] | 1004 | if (disk_i_size == i_size) |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1005 | goto out; |
Josef Bacik | 5d1f402 | 2013-01-30 14:17:31 -0500 | [diff] [blame] | 1006 | |
| 1007 | /* |
| 1008 | * We still need to update disk_i_size if outstanding_isize is greater |
| 1009 | * than disk_i_size. |
| 1010 | */ |
| 1011 | if (offset <= disk_i_size && |
| 1012 | (!ordered || ordered->outstanding_isize <= disk_i_size)) |
| 1013 | goto out; |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1014 | |
| 1015 | /* |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1016 | * walk backward from this ordered extent to disk_i_size. |
| 1017 | * if we find an ordered extent then we can't update disk i_size |
| 1018 | * yet |
| 1019 | */ |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 1020 | if (ordered) { |
| 1021 | node = rb_prev(&ordered->rb_node); |
| 1022 | } else { |
| 1023 | prev = tree_search(tree, offset); |
| 1024 | /* |
| 1025 | * we insert file extents without involving ordered struct, |
| 1026 | * so there should be no ordered struct cover this offset |
| 1027 | */ |
| 1028 | if (prev) { |
| 1029 | test = rb_entry(prev, struct btrfs_ordered_extent, |
| 1030 | rb_node); |
| 1031 | BUG_ON(offset_in_entry(test, offset)); |
| 1032 | } |
| 1033 | node = prev; |
| 1034 | } |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 1035 | for (; node; node = rb_prev(node)) { |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1036 | test = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 1037 | |
Adam Buchbinder | bb7ab3b | 2016-03-04 11:23:12 -0800 | [diff] [blame] | 1038 | /* We treat this entry as if it doesn't exist */ |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 1039 | if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags)) |
| 1040 | continue; |
Liu Bo | 62c821a | 2016-12-13 12:51:51 -0800 | [diff] [blame] | 1041 | |
| 1042 | if (entry_end(test) <= disk_i_size) |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1043 | break; |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 1044 | if (test->file_offset >= i_size) |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1045 | break; |
Liu Bo | 62c821a | 2016-12-13 12:51:51 -0800 | [diff] [blame] | 1046 | |
| 1047 | /* |
| 1048 | * We don't update disk_i_size now, so record this undealt |
| 1049 | * i_size. Or we will not know the real i_size. |
| 1050 | */ |
| 1051 | if (test->outstanding_isize < offset) |
| 1052 | test->outstanding_isize = offset; |
| 1053 | if (ordered && |
| 1054 | ordered->outstanding_isize > test->outstanding_isize) |
| 1055 | test->outstanding_isize = ordered->outstanding_isize; |
| 1056 | goto out; |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1057 | } |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 1058 | new_i_size = min_t(u64, offset, i_size); |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1059 | |
| 1060 | /* |
Miao Xie | b9a8cc5 | 2012-09-06 04:01:21 -0600 | [diff] [blame] | 1061 | * Some ordered extents may completed before the current one, and |
| 1062 | * we hold the real i_size in ->outstanding_isize. |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1063 | */ |
Miao Xie | b9a8cc5 | 2012-09-06 04:01:21 -0600 | [diff] [blame] | 1064 | if (ordered && ordered->outstanding_isize > new_i_size) |
| 1065 | new_i_size = min_t(u64, ordered->outstanding_isize, i_size); |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1066 | BTRFS_I(inode)->disk_i_size = new_i_size; |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 1067 | ret = 0; |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1068 | out: |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 1069 | /* |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 1070 | * We need to do this because we can't remove ordered extents until |
| 1071 | * after the i_disk_size has been updated and then the inode has been |
| 1072 | * updated to reflect the change, so we need to tell anybody who finds |
| 1073 | * this ordered extent that we've already done all the real work, we |
| 1074 | * just haven't completed all the other work. |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 1075 | */ |
| 1076 | if (ordered) |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 1077 | set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags); |
| 1078 | spin_unlock_irq(&tree->lock); |
Yan, Zheng | c216775 | 2009-11-12 09:34:21 +0000 | [diff] [blame] | 1079 | return ret; |
Chris Mason | dbe674a | 2008-07-17 12:54:05 -0400 | [diff] [blame] | 1080 | } |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 1081 | |
Chris Mason | eb84ae0 | 2008-07-17 13:53:27 -0400 | [diff] [blame] | 1082 | /* |
| 1083 | * search the ordered extents for one corresponding to 'offset' and |
| 1084 | * try to find a checksum. This is used because we allow pages to |
| 1085 | * be reclaimed before their checksum is actually put into the btree |
| 1086 | */ |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 1087 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
Miao Xie | e4100d9 | 2013-04-05 07:20:56 +0000 | [diff] [blame] | 1088 | u32 *sum, int len) |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 1089 | { |
| 1090 | struct btrfs_ordered_sum *ordered_sum; |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 1091 | struct btrfs_ordered_extent *ordered; |
| 1092 | struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; |
Chris Mason | 3edf7d3 | 2008-07-18 06:17:13 -0400 | [diff] [blame] | 1093 | unsigned long num_sectors; |
| 1094 | unsigned long i; |
Jeff Mahoney | da17066 | 2016-06-15 09:22:56 -0400 | [diff] [blame] | 1095 | u32 sectorsize = btrfs_inode_sectorsize(inode); |
Miao Xie | e4100d9 | 2013-04-05 07:20:56 +0000 | [diff] [blame] | 1096 | int index = 0; |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 1097 | |
| 1098 | ordered = btrfs_lookup_ordered_extent(inode, offset); |
| 1099 | if (!ordered) |
Miao Xie | e4100d9 | 2013-04-05 07:20:56 +0000 | [diff] [blame] | 1100 | return 0; |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 1101 | |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 1102 | spin_lock_irq(&tree->lock); |
Qinghuang Feng | c6e3087 | 2009-01-21 10:59:08 -0500 | [diff] [blame] | 1103 | list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { |
Miao Xie | e4100d9 | 2013-04-05 07:20:56 +0000 | [diff] [blame] | 1104 | if (disk_bytenr >= ordered_sum->bytenr && |
| 1105 | disk_bytenr < ordered_sum->bytenr + ordered_sum->len) { |
| 1106 | i = (disk_bytenr - ordered_sum->bytenr) >> |
| 1107 | inode->i_sb->s_blocksize_bits; |
Miao Xie | e4100d9 | 2013-04-05 07:20:56 +0000 | [diff] [blame] | 1108 | num_sectors = ordered_sum->len >> |
| 1109 | inode->i_sb->s_blocksize_bits; |
Miao Xie | f51a4a1 | 2013-06-19 10:36:09 +0800 | [diff] [blame] | 1110 | num_sectors = min_t(int, len - index, num_sectors - i); |
| 1111 | memcpy(sum + index, ordered_sum->sums + i, |
| 1112 | num_sectors); |
| 1113 | |
| 1114 | index += (int)num_sectors; |
| 1115 | if (index == len) |
| 1116 | goto out; |
| 1117 | disk_bytenr += num_sectors * sectorsize; |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 1118 | } |
| 1119 | } |
| 1120 | out: |
Josef Bacik | 5fd0204 | 2012-05-02 14:00:54 -0400 | [diff] [blame] | 1121 | spin_unlock_irq(&tree->lock); |
Chris Mason | 8964222 | 2008-07-24 09:41:53 -0400 | [diff] [blame] | 1122 | btrfs_put_ordered_extent(ordered); |
Miao Xie | e4100d9 | 2013-04-05 07:20:56 +0000 | [diff] [blame] | 1123 | return index; |
Chris Mason | ba1da2f | 2008-07-17 12:54:15 -0400 | [diff] [blame] | 1124 | } |
| 1125 | |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 1126 | int __init ordered_data_init(void) |
| 1127 | { |
| 1128 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", |
| 1129 | sizeof(struct btrfs_ordered_extent), 0, |
Nikolay Borisov | fba4b69 | 2016-06-23 21:17:08 +0300 | [diff] [blame] | 1130 | SLAB_MEM_SPREAD, |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 1131 | NULL); |
| 1132 | if (!btrfs_ordered_extent_cache) |
| 1133 | return -ENOMEM; |
Miao Xie | 25287e0 | 2012-10-25 09:31:03 +0000 | [diff] [blame] | 1134 | |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 1135 | return 0; |
| 1136 | } |
| 1137 | |
David Sterba | e67c718 | 2018-02-19 17:24:18 +0100 | [diff] [blame] | 1138 | void __cold ordered_data_exit(void) |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 1139 | { |
Kinglong Mee | 5598e90 | 2016-01-29 21:36:35 +0800 | [diff] [blame] | 1140 | kmem_cache_destroy(btrfs_ordered_extent_cache); |
Miao Xie | 6352b91 | 2012-09-06 04:01:51 -0600 | [diff] [blame] | 1141 | } |