Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 Oracle. All rights reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public |
| 6 | * License v2 as published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 11 | * General Public License for more details. |
| 12 | * |
| 13 | * You should have received a copy of the GNU General Public |
| 14 | * License along with this program; if not, write to the |
| 15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, |
| 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/bio.h> |
| 21 | #include <linux/buffer_head.h> |
| 22 | #include <linux/file.h> |
| 23 | #include <linux/fs.h> |
| 24 | #include <linux/pagemap.h> |
| 25 | #include <linux/highmem.h> |
| 26 | #include <linux/time.h> |
| 27 | #include <linux/init.h> |
| 28 | #include <linux/string.h> |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 29 | #include <linux/backing-dev.h> |
| 30 | #include <linux/mpage.h> |
| 31 | #include <linux/swap.h> |
| 32 | #include <linux/writeback.h> |
| 33 | #include <linux/bit_spinlock.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 34 | #include <linux/slab.h> |
David Sterba | fe30853 | 2017-05-31 17:14:56 +0200 | [diff] [blame] | 35 | #include <linux/sched/mm.h> |
Timofey Titovets | 858177d | 2017-09-28 17:33:41 +0300 | [diff] [blame] | 36 | #include <linux/sort.h> |
Timofey Titovets | 1956243 | 2017-10-08 16:11:59 +0300 | [diff] [blame] | 37 | #include <linux/log2.h> |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 38 | #include "ctree.h" |
| 39 | #include "disk-io.h" |
| 40 | #include "transaction.h" |
| 41 | #include "btrfs_inode.h" |
| 42 | #include "volumes.h" |
| 43 | #include "ordered-data.h" |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 44 | #include "compression.h" |
| 45 | #include "extent_io.h" |
| 46 | #include "extent_map.h" |
| 47 | |
Anand Jain | 8140dc3 | 2017-05-26 15:44:58 +0800 | [diff] [blame] | 48 | static int btrfs_decompress_bio(struct compressed_bio *cb); |
Eric Sandeen | 48a3b63 | 2013-04-25 20:41:01 +0000 | [diff] [blame] | 49 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 50 | static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 51 | unsigned long disk_size) |
| 52 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 53 | u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); |
David Sterba | 6c41761 | 2011-04-13 15:41:04 +0200 | [diff] [blame] | 54 | |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 55 | return sizeof(struct compressed_bio) + |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 56 | (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 57 | } |
| 58 | |
Nikolay Borisov | f898ac6 | 2017-02-20 13:50:54 +0200 | [diff] [blame] | 59 | static int check_compressed_csum(struct btrfs_inode *inode, |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 60 | struct compressed_bio *cb, |
| 61 | u64 disk_start) |
| 62 | { |
| 63 | int ret; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 64 | struct page *page; |
| 65 | unsigned long i; |
| 66 | char *kaddr; |
| 67 | u32 csum; |
| 68 | u32 *cb_sum = &cb->sums; |
| 69 | |
Nikolay Borisov | f898ac6 | 2017-02-20 13:50:54 +0200 | [diff] [blame] | 70 | if (inode->flags & BTRFS_INODE_NODATASUM) |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 71 | return 0; |
| 72 | |
| 73 | for (i = 0; i < cb->nr_pages; i++) { |
| 74 | page = cb->compressed_pages[i]; |
| 75 | csum = ~(u32)0; |
| 76 | |
Cong Wang | 7ac687d | 2011-11-25 23:14:28 +0800 | [diff] [blame] | 77 | kaddr = kmap_atomic(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 78 | csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); |
Domagoj Tršan | 0b5e3da | 2016-10-27 08:52:33 +0100 | [diff] [blame] | 79 | btrfs_csum_final(csum, (u8 *)&csum); |
Cong Wang | 7ac687d | 2011-11-25 23:14:28 +0800 | [diff] [blame] | 80 | kunmap_atomic(kaddr); |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 81 | |
| 82 | if (csum != *cb_sum) { |
Nikolay Borisov | f898ac6 | 2017-02-20 13:50:54 +0200 | [diff] [blame] | 83 | btrfs_print_data_csum_error(inode, disk_start, csum, |
Nikolay Borisov | 0970a22 | 2017-02-20 13:50:53 +0200 | [diff] [blame] | 84 | *cb_sum, cb->mirror_num); |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 85 | ret = -EIO; |
| 86 | goto fail; |
| 87 | } |
| 88 | cb_sum++; |
| 89 | |
| 90 | } |
| 91 | ret = 0; |
| 92 | fail: |
| 93 | return ret; |
| 94 | } |
| 95 | |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 96 | /* when we finish reading compressed pages from the disk, we |
| 97 | * decompress them and then run the bio end_io routines on the |
| 98 | * decompressed pages (in the inode address space). |
| 99 | * |
| 100 | * This allows the checksumming and other IO error handling routines |
| 101 | * to work normally |
| 102 | * |
| 103 | * The compressed pages are freed here, and it must be run |
| 104 | * in process context |
| 105 | */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 106 | static void end_compressed_bio_read(struct bio *bio) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 107 | { |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 108 | struct compressed_bio *cb = bio->bi_private; |
| 109 | struct inode *inode; |
| 110 | struct page *page; |
| 111 | unsigned long index; |
Liu Bo | cf1167d | 2017-09-20 17:50:18 -0600 | [diff] [blame] | 112 | unsigned int mirror = btrfs_io_bio(bio)->mirror_num; |
Liu Bo | e6311f2 | 2017-09-20 17:50:19 -0600 | [diff] [blame] | 113 | int ret = 0; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 114 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 115 | if (bio->bi_status) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 116 | cb->errors = 1; |
| 117 | |
| 118 | /* if there are more bios still pending for this compressed |
| 119 | * extent, just exit |
| 120 | */ |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 121 | if (!refcount_dec_and_test(&cb->pending_bios)) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 122 | goto out; |
| 123 | |
Liu Bo | cf1167d | 2017-09-20 17:50:18 -0600 | [diff] [blame] | 124 | /* |
| 125 | * Record the correct mirror_num in cb->orig_bio so that |
| 126 | * read-repair can work properly. |
| 127 | */ |
| 128 | ASSERT(btrfs_io_bio(cb->orig_bio)); |
| 129 | btrfs_io_bio(cb->orig_bio)->mirror_num = mirror; |
| 130 | cb->mirror_num = mirror; |
| 131 | |
Liu Bo | e6311f2 | 2017-09-20 17:50:19 -0600 | [diff] [blame] | 132 | /* |
| 133 | * Some IO in this cb have failed, just skip checksum as there |
| 134 | * is no way it could be correct. |
| 135 | */ |
| 136 | if (cb->errors == 1) |
| 137 | goto csum_failed; |
| 138 | |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 139 | inode = cb->inode; |
Nikolay Borisov | f898ac6 | 2017-02-20 13:50:54 +0200 | [diff] [blame] | 140 | ret = check_compressed_csum(BTRFS_I(inode), cb, |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 141 | (u64)bio->bi_iter.bi_sector << 9); |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 142 | if (ret) |
| 143 | goto csum_failed; |
| 144 | |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 145 | /* ok, we're the last bio for this extent, lets start |
| 146 | * the decompression. |
| 147 | */ |
Anand Jain | 8140dc3 | 2017-05-26 15:44:58 +0800 | [diff] [blame] | 148 | ret = btrfs_decompress_bio(cb); |
| 149 | |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 150 | csum_failed: |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 151 | if (ret) |
| 152 | cb->errors = 1; |
| 153 | |
| 154 | /* release the compressed pages */ |
| 155 | index = 0; |
| 156 | for (index = 0; index < cb->nr_pages; index++) { |
| 157 | page = cb->compressed_pages[index]; |
| 158 | page->mapping = NULL; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 159 | put_page(page); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | /* do io completion on the original bio */ |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 163 | if (cb->errors) { |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 164 | bio_io_error(cb->orig_bio); |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 165 | } else { |
Kent Overstreet | 2c30c71 | 2013-11-07 12:20:26 -0800 | [diff] [blame] | 166 | int i; |
| 167 | struct bio_vec *bvec; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 168 | |
| 169 | /* |
| 170 | * we have verified the checksum already, set page |
| 171 | * checked so the end_io handlers know about it |
| 172 | */ |
David Sterba | c09abff | 2017-07-13 18:10:07 +0200 | [diff] [blame] | 173 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
Kent Overstreet | 2c30c71 | 2013-11-07 12:20:26 -0800 | [diff] [blame] | 174 | bio_for_each_segment_all(bvec, cb->orig_bio, i) |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 175 | SetPageChecked(bvec->bv_page); |
Kent Overstreet | 2c30c71 | 2013-11-07 12:20:26 -0800 | [diff] [blame] | 176 | |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 177 | bio_endio(cb->orig_bio); |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 178 | } |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 179 | |
| 180 | /* finally free the cb struct */ |
| 181 | kfree(cb->compressed_pages); |
| 182 | kfree(cb); |
| 183 | out: |
| 184 | bio_put(bio); |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * Clear the writeback bits on all of the file |
| 189 | * pages for a compressed write |
| 190 | */ |
Filipe Manana | 7bdcefc | 2014-10-07 01:48:26 +0100 | [diff] [blame] | 191 | static noinline void end_compressed_writeback(struct inode *inode, |
| 192 | const struct compressed_bio *cb) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 193 | { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 194 | unsigned long index = cb->start >> PAGE_SHIFT; |
| 195 | unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 196 | struct page *pages[16]; |
| 197 | unsigned long nr_pages = end_index - index + 1; |
| 198 | int i; |
| 199 | int ret; |
| 200 | |
Filipe Manana | 7bdcefc | 2014-10-07 01:48:26 +0100 | [diff] [blame] | 201 | if (cb->errors) |
| 202 | mapping_set_error(inode->i_mapping, -EIO); |
| 203 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 204 | while (nr_pages > 0) { |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 205 | ret = find_get_pages_contig(inode->i_mapping, index, |
Chris Mason | 5b050f0 | 2008-11-11 09:34:41 -0500 | [diff] [blame] | 206 | min_t(unsigned long, |
| 207 | nr_pages, ARRAY_SIZE(pages)), pages); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 208 | if (ret == 0) { |
| 209 | nr_pages -= 1; |
| 210 | index += 1; |
| 211 | continue; |
| 212 | } |
| 213 | for (i = 0; i < ret; i++) { |
Filipe Manana | 7bdcefc | 2014-10-07 01:48:26 +0100 | [diff] [blame] | 214 | if (cb->errors) |
| 215 | SetPageError(pages[i]); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 216 | end_page_writeback(pages[i]); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 217 | put_page(pages[i]); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 218 | } |
| 219 | nr_pages -= ret; |
| 220 | index += ret; |
| 221 | } |
| 222 | /* the inode may be gone now */ |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | /* |
| 226 | * do the cleanup once all the compressed pages hit the disk. |
| 227 | * This will clear writeback on the file pages and free the compressed |
| 228 | * pages. |
| 229 | * |
| 230 | * This also calls the writeback end hooks for the file pages so that |
| 231 | * metadata and checksums can be updated in the file. |
| 232 | */ |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 233 | static void end_compressed_bio_write(struct bio *bio) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 234 | { |
| 235 | struct extent_io_tree *tree; |
| 236 | struct compressed_bio *cb = bio->bi_private; |
| 237 | struct inode *inode; |
| 238 | struct page *page; |
| 239 | unsigned long index; |
| 240 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 241 | if (bio->bi_status) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 242 | cb->errors = 1; |
| 243 | |
| 244 | /* if there are more bios still pending for this compressed |
| 245 | * extent, just exit |
| 246 | */ |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 247 | if (!refcount_dec_and_test(&cb->pending_bios)) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 248 | goto out; |
| 249 | |
| 250 | /* ok, we're the last bio for this extent, step one is to |
| 251 | * call back into the FS and do all the end_io operations |
| 252 | */ |
| 253 | inode = cb->inode; |
| 254 | tree = &BTRFS_I(inode)->io_tree; |
Chris Mason | 70b99e6 | 2008-10-31 12:46:39 -0400 | [diff] [blame] | 255 | cb->compressed_pages[0]->mapping = cb->inode->i_mapping; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 256 | tree->ops->writepage_end_io_hook(cb->compressed_pages[0], |
| 257 | cb->start, |
| 258 | cb->start + cb->len - 1, |
Filipe Manana | 7bdcefc | 2014-10-07 01:48:26 +0100 | [diff] [blame] | 259 | NULL, |
Anand Jain | 2dbe0c7 | 2017-10-14 08:35:56 +0800 | [diff] [blame] | 260 | bio->bi_status ? |
| 261 | BLK_STS_OK : BLK_STS_NOTSUPP); |
Chris Mason | 70b99e6 | 2008-10-31 12:46:39 -0400 | [diff] [blame] | 262 | cb->compressed_pages[0]->mapping = NULL; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 263 | |
Filipe Manana | 7bdcefc | 2014-10-07 01:48:26 +0100 | [diff] [blame] | 264 | end_compressed_writeback(inode, cb); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 265 | /* note, our inode could be gone now */ |
| 266 | |
| 267 | /* |
| 268 | * release the compressed pages, these came from alloc_page and |
| 269 | * are not attached to the inode at all |
| 270 | */ |
| 271 | index = 0; |
| 272 | for (index = 0; index < cb->nr_pages; index++) { |
| 273 | page = cb->compressed_pages[index]; |
| 274 | page->mapping = NULL; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 275 | put_page(page); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 276 | } |
| 277 | |
| 278 | /* finally free the cb struct */ |
| 279 | kfree(cb->compressed_pages); |
| 280 | kfree(cb); |
| 281 | out: |
| 282 | bio_put(bio); |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * worker function to build and submit bios for previously compressed pages. |
| 287 | * The corresponding pages in the inode should be marked for writeback |
| 288 | * and the compressed pages should have a reference on them for dropping |
| 289 | * when the IO is complete. |
| 290 | * |
| 291 | * This also checksums the file bytes and gets things ready for |
| 292 | * the end io hooks. |
| 293 | */ |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 294 | blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 295 | unsigned long len, u64 disk_start, |
| 296 | unsigned long compressed_len, |
| 297 | struct page **compressed_pages, |
Liu Bo | f82b735 | 2017-10-23 23:18:16 -0600 | [diff] [blame] | 298 | unsigned long nr_pages, |
| 299 | unsigned int write_flags) |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 300 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 301 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 302 | struct bio *bio = NULL; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 303 | struct compressed_bio *cb; |
| 304 | unsigned long bytes_left; |
| 305 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 306 | int pg_index = 0; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 307 | struct page *page; |
| 308 | u64 first_byte = disk_start; |
| 309 | struct block_device *bdev; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 310 | blk_status_t ret; |
Li Zefan | e55179b | 2011-07-14 03:16:47 +0000 | [diff] [blame] | 311 | int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 312 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 313 | WARN_ON(start & ((u64)PAGE_SIZE - 1)); |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 314 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
Yoshinori Sano | dac97e5 | 2011-02-15 12:01:42 +0000 | [diff] [blame] | 315 | if (!cb) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 316 | return BLK_STS_RESOURCE; |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 317 | refcount_set(&cb->pending_bios, 0); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 318 | cb->errors = 0; |
| 319 | cb->inode = inode; |
| 320 | cb->start = start; |
| 321 | cb->len = len; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 322 | cb->mirror_num = 0; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 323 | cb->compressed_pages = compressed_pages; |
| 324 | cb->compressed_len = compressed_len; |
| 325 | cb->orig_bio = NULL; |
| 326 | cb->nr_pages = nr_pages; |
| 327 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 328 | bdev = fs_info->fs_devices->latest_bdev; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 329 | |
David Sterba | c821e7f3 | 2017-06-02 18:35:36 +0200 | [diff] [blame] | 330 | bio = btrfs_bio_alloc(bdev, first_byte); |
Liu Bo | f82b735 | 2017-10-23 23:18:16 -0600 | [diff] [blame] | 331 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 332 | bio->bi_private = cb; |
| 333 | bio->bi_end_io = end_compressed_bio_write; |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 334 | refcount_set(&cb->pending_bios, 1); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 335 | |
| 336 | /* create and submit bios for the compressed pages */ |
| 337 | bytes_left = compressed_len; |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 338 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 339 | int submit = 0; |
| 340 | |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 341 | page = compressed_pages[pg_index]; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 342 | page->mapping = inode->i_mapping; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 343 | if (bio->bi_iter.bi_size) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 344 | submit = io_tree->ops->merge_bio_hook(page, 0, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 345 | PAGE_SIZE, |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 346 | bio, 0); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 347 | |
Chris Mason | 70b99e6 | 2008-10-31 12:46:39 -0400 | [diff] [blame] | 348 | page->mapping = NULL; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 349 | if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) < |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 350 | PAGE_SIZE) { |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 351 | bio_get(bio); |
| 352 | |
Chris Mason | af09abf | 2008-11-07 12:35:44 -0500 | [diff] [blame] | 353 | /* |
| 354 | * inc the count before we submit the bio so |
| 355 | * we know the end IO handler won't happen before |
| 356 | * we inc the count. Otherwise, the cb might get |
| 357 | * freed before we're done setting it up |
| 358 | */ |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 359 | refcount_inc(&cb->pending_bios); |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 360 | ret = btrfs_bio_wq_end_io(fs_info, bio, |
| 361 | BTRFS_WQ_ENDIO_DATA); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 362 | BUG_ON(ret); /* -ENOMEM */ |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 363 | |
Li Zefan | e55179b | 2011-07-14 03:16:47 +0000 | [diff] [blame] | 364 | if (!skip_sum) { |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 365 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 366 | BUG_ON(ret); /* -ENOMEM */ |
Li Zefan | e55179b | 2011-07-14 03:16:47 +0000 | [diff] [blame] | 367 | } |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 368 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 369 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
Liu Bo | f5daf2c | 2016-06-22 18:32:06 -0700 | [diff] [blame] | 370 | if (ret) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 371 | bio->bi_status = ret; |
Liu Bo | f5daf2c | 2016-06-22 18:32:06 -0700 | [diff] [blame] | 372 | bio_endio(bio); |
| 373 | } |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 374 | |
| 375 | bio_put(bio); |
| 376 | |
David Sterba | c821e7f3 | 2017-06-02 18:35:36 +0200 | [diff] [blame] | 377 | bio = btrfs_bio_alloc(bdev, first_byte); |
Liu Bo | f82b735 | 2017-10-23 23:18:16 -0600 | [diff] [blame] | 378 | bio->bi_opf = REQ_OP_WRITE | write_flags; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 379 | bio->bi_private = cb; |
| 380 | bio->bi_end_io = end_compressed_bio_write; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 381 | bio_add_page(bio, page, PAGE_SIZE, 0); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 382 | } |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 383 | if (bytes_left < PAGE_SIZE) { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 384 | btrfs_info(fs_info, |
Frank Holton | efe120a | 2013-12-20 11:37:06 -0500 | [diff] [blame] | 385 | "bytes left %lu compress len %lu nr %lu", |
Chris Mason | cfbc246 | 2008-10-30 13:22:14 -0400 | [diff] [blame] | 386 | bytes_left, cb->compressed_len, cb->nr_pages); |
| 387 | } |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 388 | bytes_left -= PAGE_SIZE; |
| 389 | first_byte += PAGE_SIZE; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 390 | cond_resched(); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 391 | } |
| 392 | bio_get(bio); |
| 393 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 394 | ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 395 | BUG_ON(ret); /* -ENOMEM */ |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 396 | |
Li Zefan | e55179b | 2011-07-14 03:16:47 +0000 | [diff] [blame] | 397 | if (!skip_sum) { |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 398 | ret = btrfs_csum_one_bio(inode, bio, start, 1); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 399 | BUG_ON(ret); /* -ENOMEM */ |
Li Zefan | e55179b | 2011-07-14 03:16:47 +0000 | [diff] [blame] | 400 | } |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 401 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 402 | ret = btrfs_map_bio(fs_info, bio, 0, 1); |
Liu Bo | f5daf2c | 2016-06-22 18:32:06 -0700 | [diff] [blame] | 403 | if (ret) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 404 | bio->bi_status = ret; |
Liu Bo | f5daf2c | 2016-06-22 18:32:06 -0700 | [diff] [blame] | 405 | bio_endio(bio); |
| 406 | } |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 407 | |
| 408 | bio_put(bio); |
| 409 | return 0; |
| 410 | } |
| 411 | |
Christoph Hellwig | 2a4d0c9 | 2016-11-25 09:07:51 +0100 | [diff] [blame] | 412 | static u64 bio_end_offset(struct bio *bio) |
| 413 | { |
Ming Lei | c45a8f2 | 2017-12-18 20:22:05 +0800 | [diff] [blame^] | 414 | struct bio_vec *last = bio_last_bvec_all(bio); |
Christoph Hellwig | 2a4d0c9 | 2016-11-25 09:07:51 +0100 | [diff] [blame] | 415 | |
| 416 | return page_offset(last->bv_page) + last->bv_len + last->bv_offset; |
| 417 | } |
| 418 | |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 419 | static noinline int add_ra_bio_pages(struct inode *inode, |
| 420 | u64 compressed_end, |
| 421 | struct compressed_bio *cb) |
| 422 | { |
| 423 | unsigned long end_index; |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 424 | unsigned long pg_index; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 425 | u64 last_offset; |
| 426 | u64 isize = i_size_read(inode); |
| 427 | int ret; |
| 428 | struct page *page; |
| 429 | unsigned long nr_pages = 0; |
| 430 | struct extent_map *em; |
| 431 | struct address_space *mapping = inode->i_mapping; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 432 | struct extent_map_tree *em_tree; |
| 433 | struct extent_io_tree *tree; |
| 434 | u64 end; |
| 435 | int misses = 0; |
| 436 | |
Christoph Hellwig | 2a4d0c9 | 2016-11-25 09:07:51 +0100 | [diff] [blame] | 437 | last_offset = bio_end_offset(cb->orig_bio); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 438 | em_tree = &BTRFS_I(inode)->extent_tree; |
| 439 | tree = &BTRFS_I(inode)->io_tree; |
| 440 | |
| 441 | if (isize == 0) |
| 442 | return 0; |
| 443 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 444 | end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 445 | |
Chris Mason | d397712 | 2009-01-05 21:25:51 -0500 | [diff] [blame] | 446 | while (last_offset < compressed_end) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 447 | pg_index = last_offset >> PAGE_SHIFT; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 448 | |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 449 | if (pg_index > end_index) |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 450 | break; |
| 451 | |
| 452 | rcu_read_lock(); |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 453 | page = radix_tree_lookup(&mapping->page_tree, pg_index); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 454 | rcu_read_unlock(); |
Johannes Weiner | 0cd6144 | 2014-04-03 14:47:46 -0700 | [diff] [blame] | 455 | if (page && !radix_tree_exceptional_entry(page)) { |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 456 | misses++; |
| 457 | if (misses > 4) |
| 458 | break; |
| 459 | goto next; |
| 460 | } |
| 461 | |
Michal Hocko | c62d255 | 2015-11-06 16:28:49 -0800 | [diff] [blame] | 462 | page = __page_cache_alloc(mapping_gfp_constraint(mapping, |
| 463 | ~__GFP_FS)); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 464 | if (!page) |
| 465 | break; |
| 466 | |
Michal Hocko | c62d255 | 2015-11-06 16:28:49 -0800 | [diff] [blame] | 467 | if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 468 | put_page(page); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 469 | goto next; |
| 470 | } |
| 471 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 472 | end = last_offset + PAGE_SIZE - 1; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 473 | /* |
| 474 | * at this point, we have a locked page in the page cache |
| 475 | * for these bytes in the file. But, we have to make |
| 476 | * sure they map to this compressed extent on disk. |
| 477 | */ |
| 478 | set_page_extent_mapped(page); |
Jeff Mahoney | d008237 | 2012-03-01 14:57:19 +0100 | [diff] [blame] | 479 | lock_extent(tree, last_offset, end); |
Chris Mason | 890871b | 2009-09-02 16:24:52 -0400 | [diff] [blame] | 480 | read_lock(&em_tree->lock); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 481 | em = lookup_extent_mapping(em_tree, last_offset, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 482 | PAGE_SIZE); |
Chris Mason | 890871b | 2009-09-02 16:24:52 -0400 | [diff] [blame] | 483 | read_unlock(&em_tree->lock); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 484 | |
| 485 | if (!em || last_offset < em->start || |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 486 | (last_offset + PAGE_SIZE > extent_map_end(em)) || |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 487 | (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 488 | free_extent_map(em); |
Jeff Mahoney | d008237 | 2012-03-01 14:57:19 +0100 | [diff] [blame] | 489 | unlock_extent(tree, last_offset, end); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 490 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 491 | put_page(page); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 492 | break; |
| 493 | } |
| 494 | free_extent_map(em); |
| 495 | |
| 496 | if (page->index == end_index) { |
| 497 | char *userpage; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 498 | size_t zero_offset = isize & (PAGE_SIZE - 1); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 499 | |
| 500 | if (zero_offset) { |
| 501 | int zeros; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 502 | zeros = PAGE_SIZE - zero_offset; |
Cong Wang | 7ac687d | 2011-11-25 23:14:28 +0800 | [diff] [blame] | 503 | userpage = kmap_atomic(page); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 504 | memset(userpage + zero_offset, 0, zeros); |
| 505 | flush_dcache_page(page); |
Cong Wang | 7ac687d | 2011-11-25 23:14:28 +0800 | [diff] [blame] | 506 | kunmap_atomic(userpage); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 507 | } |
| 508 | } |
| 509 | |
| 510 | ret = bio_add_page(cb->orig_bio, page, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 511 | PAGE_SIZE, 0); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 512 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 513 | if (ret == PAGE_SIZE) { |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 514 | nr_pages++; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 515 | put_page(page); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 516 | } else { |
Jeff Mahoney | d008237 | 2012-03-01 14:57:19 +0100 | [diff] [blame] | 517 | unlock_extent(tree, last_offset, end); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 518 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 519 | put_page(page); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 520 | break; |
| 521 | } |
| 522 | next: |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 523 | last_offset += PAGE_SIZE; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 524 | } |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 525 | return 0; |
| 526 | } |
| 527 | |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 528 | /* |
| 529 | * for a compressed read, the bio we get passed has all the inode pages |
| 530 | * in it. We don't actually do IO on those pages but allocate new ones |
| 531 | * to hold the compressed pages on disk. |
| 532 | * |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 533 | * bio->bi_iter.bi_sector points to the compressed extent on disk |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 534 | * bio->bi_io_vec points to all of the inode pages |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 535 | * |
| 536 | * After the compressed pages are read, we copy the bytes into the |
| 537 | * bio we were passed and then call the bio end_io calls |
| 538 | */ |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 539 | blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 540 | int mirror_num, unsigned long bio_flags) |
| 541 | { |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 542 | struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 543 | struct extent_io_tree *tree; |
| 544 | struct extent_map_tree *em_tree; |
| 545 | struct compressed_bio *cb; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 546 | unsigned long compressed_len; |
| 547 | unsigned long nr_pages; |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 548 | unsigned long pg_index; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 549 | struct page *page; |
| 550 | struct block_device *bdev; |
| 551 | struct bio *comp_bio; |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 552 | u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9; |
Chris Mason | e04ca62 | 2008-11-10 11:44:58 -0500 | [diff] [blame] | 553 | u64 em_len; |
| 554 | u64 em_start; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 555 | struct extent_map *em; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 556 | blk_status_t ret = BLK_STS_RESOURCE; |
Josef Bacik | 15e3004a | 2012-10-05 13:39:50 -0400 | [diff] [blame] | 557 | int faili = 0; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 558 | u32 *sums; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 559 | |
| 560 | tree = &BTRFS_I(inode)->io_tree; |
| 561 | em_tree = &BTRFS_I(inode)->extent_tree; |
| 562 | |
| 563 | /* we need the actual starting offset of this extent in the file */ |
Chris Mason | 890871b | 2009-09-02 16:24:52 -0400 | [diff] [blame] | 564 | read_lock(&em_tree->lock); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 565 | em = lookup_extent_mapping(em_tree, |
Ming Lei | 263663c | 2017-12-18 20:22:04 +0800 | [diff] [blame] | 566 | page_offset(bio_first_page_all(bio)), |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 567 | PAGE_SIZE); |
Chris Mason | 890871b | 2009-09-02 16:24:52 -0400 | [diff] [blame] | 568 | read_unlock(&em_tree->lock); |
Tsutomu Itoh | 285190d | 2012-02-16 16:23:58 +0900 | [diff] [blame] | 569 | if (!em) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 570 | return BLK_STS_IOERR; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 571 | |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 572 | compressed_len = em->block_len; |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 573 | cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); |
liubo | 6b82ce8 | 2011-01-26 06:21:39 +0000 | [diff] [blame] | 574 | if (!cb) |
| 575 | goto out; |
| 576 | |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 577 | refcount_set(&cb->pending_bios, 0); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 578 | cb->errors = 0; |
| 579 | cb->inode = inode; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 580 | cb->mirror_num = mirror_num; |
| 581 | sums = &cb->sums; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 582 | |
Yan Zheng | ff5b7ee | 2008-11-10 07:34:43 -0500 | [diff] [blame] | 583 | cb->start = em->orig_start; |
Chris Mason | e04ca62 | 2008-11-10 11:44:58 -0500 | [diff] [blame] | 584 | em_len = em->len; |
| 585 | em_start = em->start; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 586 | |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 587 | free_extent_map(em); |
Chris Mason | e04ca62 | 2008-11-10 11:44:58 -0500 | [diff] [blame] | 588 | em = NULL; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 589 | |
Christoph Hellwig | 8138105 | 2016-11-25 09:07:50 +0100 | [diff] [blame] | 590 | cb->len = bio->bi_iter.bi_size; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 591 | cb->compressed_len = compressed_len; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 592 | cb->compress_type = extent_compress_type(bio_flags); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 593 | cb->orig_bio = bio; |
| 594 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 595 | nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); |
David Sterba | 31e818f | 2015-02-20 18:00:26 +0100 | [diff] [blame] | 596 | cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 597 | GFP_NOFS); |
liubo | 6b82ce8 | 2011-01-26 06:21:39 +0000 | [diff] [blame] | 598 | if (!cb->compressed_pages) |
| 599 | goto fail1; |
| 600 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 601 | bdev = fs_info->fs_devices->latest_bdev; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 602 | |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 603 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
| 604 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 605 | __GFP_HIGHMEM); |
Josef Bacik | 15e3004a | 2012-10-05 13:39:50 -0400 | [diff] [blame] | 606 | if (!cb->compressed_pages[pg_index]) { |
| 607 | faili = pg_index - 1; |
Dan Carpenter | 0e9350d | 2017-06-19 13:55:37 +0300 | [diff] [blame] | 608 | ret = BLK_STS_RESOURCE; |
liubo | 6b82ce8 | 2011-01-26 06:21:39 +0000 | [diff] [blame] | 609 | goto fail2; |
Josef Bacik | 15e3004a | 2012-10-05 13:39:50 -0400 | [diff] [blame] | 610 | } |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 611 | } |
Josef Bacik | 15e3004a | 2012-10-05 13:39:50 -0400 | [diff] [blame] | 612 | faili = nr_pages - 1; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 613 | cb->nr_pages = nr_pages; |
| 614 | |
Filipe Manana | 7f042a8 | 2016-01-27 19:17:20 +0000 | [diff] [blame] | 615 | add_ra_bio_pages(inode, em_start + em_len, cb); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 616 | |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 617 | /* include any pages we added in add_ra-bio_pages */ |
Christoph Hellwig | 8138105 | 2016-11-25 09:07:50 +0100 | [diff] [blame] | 618 | cb->len = bio->bi_iter.bi_size; |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 619 | |
David Sterba | c821e7f3 | 2017-06-02 18:35:36 +0200 | [diff] [blame] | 620 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
Mike Christie | 37226b2 | 2016-06-05 14:31:52 -0500 | [diff] [blame] | 621 | bio_set_op_attrs (comp_bio, REQ_OP_READ, 0); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 622 | comp_bio->bi_private = cb; |
| 623 | comp_bio->bi_end_io = end_compressed_bio_read; |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 624 | refcount_set(&cb->pending_bios, 1); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 625 | |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 626 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 627 | int submit = 0; |
| 628 | |
David Sterba | 306e16c | 2011-04-19 14:29:38 +0200 | [diff] [blame] | 629 | page = cb->compressed_pages[pg_index]; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 630 | page->mapping = inode->i_mapping; |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 631 | page->index = em_start >> PAGE_SHIFT; |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 632 | |
Kent Overstreet | 4f024f3 | 2013-10-11 15:44:27 -0700 | [diff] [blame] | 633 | if (comp_bio->bi_iter.bi_size) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 634 | submit = tree->ops->merge_bio_hook(page, 0, |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 635 | PAGE_SIZE, |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 636 | comp_bio, 0); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 637 | |
Chris Mason | 70b99e6 | 2008-10-31 12:46:39 -0400 | [diff] [blame] | 638 | page->mapping = NULL; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 639 | if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 640 | PAGE_SIZE) { |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 641 | bio_get(comp_bio); |
| 642 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 643 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, |
| 644 | BTRFS_WQ_ENDIO_DATA); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 645 | BUG_ON(ret); /* -ENOMEM */ |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 646 | |
Chris Mason | af09abf | 2008-11-07 12:35:44 -0500 | [diff] [blame] | 647 | /* |
| 648 | * inc the count before we submit the bio so |
| 649 | * we know the end IO handler won't happen before |
| 650 | * we inc the count. Otherwise, the cb might get |
| 651 | * freed before we're done setting it up |
| 652 | */ |
Elena Reshetova | a50299a | 2017-03-03 10:55:20 +0200 | [diff] [blame] | 653 | refcount_inc(&cb->pending_bios); |
Chris Mason | af09abf | 2008-11-07 12:35:44 -0500 | [diff] [blame] | 654 | |
Christoph Hellwig | 6cbff00 | 2009-04-17 10:37:41 +0200 | [diff] [blame] | 655 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 656 | ret = btrfs_lookup_bio_sums(inode, comp_bio, |
| 657 | sums); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 658 | BUG_ON(ret); /* -ENOMEM */ |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 659 | } |
David Sterba | ed6078f | 2014-06-05 01:59:57 +0200 | [diff] [blame] | 660 | sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 661 | fs_info->sectorsize); |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 662 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 663 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 664 | if (ret) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 665 | comp_bio->bi_status = ret; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 666 | bio_endio(comp_bio); |
| 667 | } |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 668 | |
| 669 | bio_put(comp_bio); |
| 670 | |
David Sterba | c821e7f3 | 2017-06-02 18:35:36 +0200 | [diff] [blame] | 671 | comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte); |
Mike Christie | 37226b2 | 2016-06-05 14:31:52 -0500 | [diff] [blame] | 672 | bio_set_op_attrs(comp_bio, REQ_OP_READ, 0); |
Chris Mason | 771ed68 | 2008-11-06 22:02:51 -0500 | [diff] [blame] | 673 | comp_bio->bi_private = cb; |
| 674 | comp_bio->bi_end_io = end_compressed_bio_read; |
| 675 | |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 676 | bio_add_page(comp_bio, page, PAGE_SIZE, 0); |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 677 | } |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 678 | cur_disk_byte += PAGE_SIZE; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 679 | } |
| 680 | bio_get(comp_bio); |
| 681 | |
Jeff Mahoney | 0b246af | 2016-06-22 18:54:23 -0400 | [diff] [blame] | 682 | ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 683 | BUG_ON(ret); /* -ENOMEM */ |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 684 | |
Tsutomu Itoh | c2db107 | 2011-03-01 06:48:31 +0000 | [diff] [blame] | 685 | if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 686 | ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); |
Jeff Mahoney | 79787ea | 2012-03-12 16:03:00 +0100 | [diff] [blame] | 687 | BUG_ON(ret); /* -ENOMEM */ |
Tsutomu Itoh | c2db107 | 2011-03-01 06:48:31 +0000 | [diff] [blame] | 688 | } |
Chris Mason | d20f704 | 2008-12-08 16:58:54 -0500 | [diff] [blame] | 689 | |
Jeff Mahoney | 2ff7e61 | 2016-06-22 18:54:24 -0400 | [diff] [blame] | 690 | ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 691 | if (ret) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 692 | comp_bio->bi_status = ret; |
Christoph Hellwig | 4246a0b | 2015-07-20 15:29:37 +0200 | [diff] [blame] | 693 | bio_endio(comp_bio); |
| 694 | } |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 695 | |
| 696 | bio_put(comp_bio); |
| 697 | return 0; |
liubo | 6b82ce8 | 2011-01-26 06:21:39 +0000 | [diff] [blame] | 698 | |
| 699 | fail2: |
Josef Bacik | 15e3004a | 2012-10-05 13:39:50 -0400 | [diff] [blame] | 700 | while (faili >= 0) { |
| 701 | __free_page(cb->compressed_pages[faili]); |
| 702 | faili--; |
| 703 | } |
liubo | 6b82ce8 | 2011-01-26 06:21:39 +0000 | [diff] [blame] | 704 | |
| 705 | kfree(cb->compressed_pages); |
| 706 | fail1: |
| 707 | kfree(cb); |
| 708 | out: |
| 709 | free_extent_map(em); |
| 710 | return ret; |
Chris Mason | c8b9781 | 2008-10-29 14:49:59 -0400 | [diff] [blame] | 711 | } |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 712 | |
Timofey Titovets | 17b5a6c | 2017-09-28 17:33:37 +0300 | [diff] [blame] | 713 | /* |
| 714 | * Heuristic uses systematic sampling to collect data from the input data |
| 715 | * range, the logic can be tuned by the following constants: |
| 716 | * |
| 717 | * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample |
| 718 | * @SAMPLING_INTERVAL - range from which the sampled data can be collected |
| 719 | */ |
| 720 | #define SAMPLING_READ_SIZE (16) |
| 721 | #define SAMPLING_INTERVAL (256) |
| 722 | |
| 723 | /* |
| 724 | * For statistical analysis of the input data we consider bytes that form a |
| 725 | * Galois Field of 256 objects. Each object has an attribute count, ie. how |
| 726 | * many times the object appeared in the sample. |
| 727 | */ |
| 728 | #define BUCKET_SIZE (256) |
| 729 | |
| 730 | /* |
| 731 | * The size of the sample is based on a statistical sampling rule of thumb. |
| 732 | * The common way is to perform sampling tests as long as the number of |
| 733 | * elements in each cell is at least 5. |
| 734 | * |
| 735 | * Instead of 5, we choose 32 to obtain more accurate results. |
| 736 | * If the data contain the maximum number of symbols, which is 256, we obtain a |
| 737 | * sample size bound by 8192. |
| 738 | * |
| 739 | * For a sample of at most 8KB of data per data range: 16 consecutive bytes |
| 740 | * from up to 512 locations. |
| 741 | */ |
| 742 | #define MAX_SAMPLE_SIZE (BTRFS_MAX_UNCOMPRESSED * \ |
| 743 | SAMPLING_READ_SIZE / SAMPLING_INTERVAL) |
| 744 | |
| 745 | struct bucket_item { |
| 746 | u32 count; |
| 747 | }; |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 748 | |
| 749 | struct heuristic_ws { |
Timofey Titovets | 17b5a6c | 2017-09-28 17:33:37 +0300 | [diff] [blame] | 750 | /* Partial copy of input data */ |
| 751 | u8 *sample; |
Timofey Titovets | a440d48 | 2017-09-28 17:33:38 +0300 | [diff] [blame] | 752 | u32 sample_size; |
Timofey Titovets | 17b5a6c | 2017-09-28 17:33:37 +0300 | [diff] [blame] | 753 | /* Buckets store counters for each byte value */ |
| 754 | struct bucket_item *bucket; |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 755 | struct list_head list; |
| 756 | }; |
| 757 | |
| 758 | static void free_heuristic_ws(struct list_head *ws) |
| 759 | { |
| 760 | struct heuristic_ws *workspace; |
| 761 | |
| 762 | workspace = list_entry(ws, struct heuristic_ws, list); |
| 763 | |
Timofey Titovets | 17b5a6c | 2017-09-28 17:33:37 +0300 | [diff] [blame] | 764 | kvfree(workspace->sample); |
| 765 | kfree(workspace->bucket); |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 766 | kfree(workspace); |
| 767 | } |
| 768 | |
| 769 | static struct list_head *alloc_heuristic_ws(void) |
| 770 | { |
| 771 | struct heuristic_ws *ws; |
| 772 | |
| 773 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); |
| 774 | if (!ws) |
| 775 | return ERR_PTR(-ENOMEM); |
| 776 | |
Timofey Titovets | 17b5a6c | 2017-09-28 17:33:37 +0300 | [diff] [blame] | 777 | ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL); |
| 778 | if (!ws->sample) |
| 779 | goto fail; |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 780 | |
Timofey Titovets | 17b5a6c | 2017-09-28 17:33:37 +0300 | [diff] [blame] | 781 | ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL); |
| 782 | if (!ws->bucket) |
| 783 | goto fail; |
| 784 | |
| 785 | INIT_LIST_HEAD(&ws->list); |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 786 | return &ws->list; |
Timofey Titovets | 17b5a6c | 2017-09-28 17:33:37 +0300 | [diff] [blame] | 787 | fail: |
| 788 | free_heuristic_ws(&ws->list); |
| 789 | return ERR_PTR(-ENOMEM); |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 790 | } |
| 791 | |
| 792 | struct workspaces_list { |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 793 | struct list_head idle_ws; |
| 794 | spinlock_t ws_lock; |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 795 | /* Number of free workspaces */ |
| 796 | int free_ws; |
| 797 | /* Total number of allocated workspaces */ |
| 798 | atomic_t total_ws; |
| 799 | /* Waiters for a free workspace */ |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 800 | wait_queue_head_t ws_wait; |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 801 | }; |
| 802 | |
| 803 | static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; |
| 804 | |
| 805 | static struct workspaces_list btrfs_heuristic_ws; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 806 | |
David Sterba | e8c9f18 | 2015-01-02 18:23:10 +0100 | [diff] [blame] | 807 | static const struct btrfs_compress_op * const btrfs_compress_op[] = { |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 808 | &btrfs_zlib_compress, |
Li Zefan | a6fa6fa | 2010-10-25 15:12:26 +0800 | [diff] [blame] | 809 | &btrfs_lzo_compress, |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 810 | &btrfs_zstd_compress, |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 811 | }; |
| 812 | |
Jeff Mahoney | 143bede | 2012-03-01 14:56:26 +0100 | [diff] [blame] | 813 | void __init btrfs_init_compress(void) |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 814 | { |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 815 | struct list_head *workspace; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 816 | int i; |
| 817 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 818 | INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); |
| 819 | spin_lock_init(&btrfs_heuristic_ws.ws_lock); |
| 820 | atomic_set(&btrfs_heuristic_ws.total_ws, 0); |
| 821 | init_waitqueue_head(&btrfs_heuristic_ws.ws_wait); |
David Sterba | f77dd0d | 2016-04-27 02:55:15 +0200 | [diff] [blame] | 822 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 823 | workspace = alloc_heuristic_ws(); |
| 824 | if (IS_ERR(workspace)) { |
| 825 | pr_warn( |
| 826 | "BTRFS: cannot preallocate heuristic workspace, will try later\n"); |
| 827 | } else { |
| 828 | atomic_set(&btrfs_heuristic_ws.total_ws, 1); |
| 829 | btrfs_heuristic_ws.free_ws = 1; |
| 830 | list_add(workspace, &btrfs_heuristic_ws.idle_ws); |
| 831 | } |
| 832 | |
| 833 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 834 | INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); |
| 835 | spin_lock_init(&btrfs_comp_ws[i].ws_lock); |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 836 | atomic_set(&btrfs_comp_ws[i].total_ws, 0); |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 837 | init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); |
David Sterba | f77dd0d | 2016-04-27 02:55:15 +0200 | [diff] [blame] | 838 | |
| 839 | /* |
| 840 | * Preallocate one workspace for each compression type so |
| 841 | * we can guarantee forward progress in the worst case |
| 842 | */ |
| 843 | workspace = btrfs_compress_op[i]->alloc_workspace(); |
| 844 | if (IS_ERR(workspace)) { |
Jeff Mahoney | 62e8557 | 2016-09-20 10:05:01 -0400 | [diff] [blame] | 845 | pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); |
David Sterba | f77dd0d | 2016-04-27 02:55:15 +0200 | [diff] [blame] | 846 | } else { |
| 847 | atomic_set(&btrfs_comp_ws[i].total_ws, 1); |
| 848 | btrfs_comp_ws[i].free_ws = 1; |
| 849 | list_add(workspace, &btrfs_comp_ws[i].idle_ws); |
| 850 | } |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 851 | } |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 852 | } |
| 853 | |
| 854 | /* |
David Sterba | e721e49 | 2016-04-27 02:41:17 +0200 | [diff] [blame] | 855 | * This finds an available workspace or allocates a new one. |
| 856 | * If it's not possible to allocate a new one, waits until there's one. |
| 857 | * Preallocation makes a forward progress guarantees and we do not return |
| 858 | * errors. |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 859 | */ |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 860 | static struct list_head *__find_workspace(int type, bool heuristic) |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 861 | { |
| 862 | struct list_head *workspace; |
| 863 | int cpus = num_online_cpus(); |
| 864 | int idx = type - 1; |
David Sterba | fe30853 | 2017-05-31 17:14:56 +0200 | [diff] [blame] | 865 | unsigned nofs_flag; |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 866 | struct list_head *idle_ws; |
| 867 | spinlock_t *ws_lock; |
| 868 | atomic_t *total_ws; |
| 869 | wait_queue_head_t *ws_wait; |
| 870 | int *free_ws; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 871 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 872 | if (heuristic) { |
| 873 | idle_ws = &btrfs_heuristic_ws.idle_ws; |
| 874 | ws_lock = &btrfs_heuristic_ws.ws_lock; |
| 875 | total_ws = &btrfs_heuristic_ws.total_ws; |
| 876 | ws_wait = &btrfs_heuristic_ws.ws_wait; |
| 877 | free_ws = &btrfs_heuristic_ws.free_ws; |
| 878 | } else { |
| 879 | idle_ws = &btrfs_comp_ws[idx].idle_ws; |
| 880 | ws_lock = &btrfs_comp_ws[idx].ws_lock; |
| 881 | total_ws = &btrfs_comp_ws[idx].total_ws; |
| 882 | ws_wait = &btrfs_comp_ws[idx].ws_wait; |
| 883 | free_ws = &btrfs_comp_ws[idx].free_ws; |
| 884 | } |
| 885 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 886 | again: |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 887 | spin_lock(ws_lock); |
| 888 | if (!list_empty(idle_ws)) { |
| 889 | workspace = idle_ws->next; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 890 | list_del(workspace); |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 891 | (*free_ws)--; |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 892 | spin_unlock(ws_lock); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 893 | return workspace; |
| 894 | |
| 895 | } |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 896 | if (atomic_read(total_ws) > cpus) { |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 897 | DEFINE_WAIT(wait); |
| 898 | |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 899 | spin_unlock(ws_lock); |
| 900 | prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 901 | if (atomic_read(total_ws) > cpus && !*free_ws) |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 902 | schedule(); |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 903 | finish_wait(ws_wait, &wait); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 904 | goto again; |
| 905 | } |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 906 | atomic_inc(total_ws); |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 907 | spin_unlock(ws_lock); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 908 | |
David Sterba | fe30853 | 2017-05-31 17:14:56 +0200 | [diff] [blame] | 909 | /* |
| 910 | * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have |
| 911 | * to turn it off here because we might get called from the restricted |
| 912 | * context of btrfs_compress_bio/btrfs_compress_pages |
| 913 | */ |
| 914 | nofs_flag = memalloc_nofs_save(); |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 915 | if (heuristic) |
| 916 | workspace = alloc_heuristic_ws(); |
| 917 | else |
| 918 | workspace = btrfs_compress_op[idx]->alloc_workspace(); |
David Sterba | fe30853 | 2017-05-31 17:14:56 +0200 | [diff] [blame] | 919 | memalloc_nofs_restore(nofs_flag); |
| 920 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 921 | if (IS_ERR(workspace)) { |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 922 | atomic_dec(total_ws); |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 923 | wake_up(ws_wait); |
David Sterba | e721e49 | 2016-04-27 02:41:17 +0200 | [diff] [blame] | 924 | |
| 925 | /* |
| 926 | * Do not return the error but go back to waiting. There's a |
| 927 | * workspace preallocated for each type and the compression |
| 928 | * time is bounded so we get to a workspace eventually. This |
| 929 | * makes our caller's life easier. |
David Sterba | 52356716 | 2016-04-27 03:07:39 +0200 | [diff] [blame] | 930 | * |
| 931 | * To prevent silent and low-probability deadlocks (when the |
| 932 | * initial preallocation fails), check if there are any |
| 933 | * workspaces at all. |
David Sterba | e721e49 | 2016-04-27 02:41:17 +0200 | [diff] [blame] | 934 | */ |
David Sterba | 52356716 | 2016-04-27 03:07:39 +0200 | [diff] [blame] | 935 | if (atomic_read(total_ws) == 0) { |
| 936 | static DEFINE_RATELIMIT_STATE(_rs, |
| 937 | /* once per minute */ 60 * HZ, |
| 938 | /* no burst */ 1); |
| 939 | |
| 940 | if (__ratelimit(&_rs)) { |
Jeff Mahoney | ab8d0fc | 2016-09-20 10:05:02 -0400 | [diff] [blame] | 941 | pr_warn("BTRFS: no compression workspaces, low memory, retrying\n"); |
David Sterba | 52356716 | 2016-04-27 03:07:39 +0200 | [diff] [blame] | 942 | } |
| 943 | } |
David Sterba | e721e49 | 2016-04-27 02:41:17 +0200 | [diff] [blame] | 944 | goto again; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 945 | } |
| 946 | return workspace; |
| 947 | } |
| 948 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 949 | static struct list_head *find_workspace(int type) |
| 950 | { |
| 951 | return __find_workspace(type, false); |
| 952 | } |
| 953 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 954 | /* |
| 955 | * put a workspace struct back on the list or free it if we have enough |
| 956 | * idle ones sitting around |
| 957 | */ |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 958 | static void __free_workspace(int type, struct list_head *workspace, |
| 959 | bool heuristic) |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 960 | { |
| 961 | int idx = type - 1; |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 962 | struct list_head *idle_ws; |
| 963 | spinlock_t *ws_lock; |
| 964 | atomic_t *total_ws; |
| 965 | wait_queue_head_t *ws_wait; |
| 966 | int *free_ws; |
| 967 | |
| 968 | if (heuristic) { |
| 969 | idle_ws = &btrfs_heuristic_ws.idle_ws; |
| 970 | ws_lock = &btrfs_heuristic_ws.ws_lock; |
| 971 | total_ws = &btrfs_heuristic_ws.total_ws; |
| 972 | ws_wait = &btrfs_heuristic_ws.ws_wait; |
| 973 | free_ws = &btrfs_heuristic_ws.free_ws; |
| 974 | } else { |
| 975 | idle_ws = &btrfs_comp_ws[idx].idle_ws; |
| 976 | ws_lock = &btrfs_comp_ws[idx].ws_lock; |
| 977 | total_ws = &btrfs_comp_ws[idx].total_ws; |
| 978 | ws_wait = &btrfs_comp_ws[idx].ws_wait; |
| 979 | free_ws = &btrfs_comp_ws[idx].free_ws; |
| 980 | } |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 981 | |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 982 | spin_lock(ws_lock); |
Nick Terrell | 26b28dc | 2017-06-29 10:57:26 -0700 | [diff] [blame] | 983 | if (*free_ws <= num_online_cpus()) { |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 984 | list_add(workspace, idle_ws); |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 985 | (*free_ws)++; |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 986 | spin_unlock(ws_lock); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 987 | goto wake; |
| 988 | } |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 989 | spin_unlock(ws_lock); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 990 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 991 | if (heuristic) |
| 992 | free_heuristic_ws(workspace); |
| 993 | else |
| 994 | btrfs_compress_op[idx]->free_workspace(workspace); |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 995 | atomic_dec(total_ws); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 996 | wake: |
David Sterba | a83342a | 2015-02-16 19:36:47 +0100 | [diff] [blame] | 997 | /* |
| 998 | * Make sure counter is updated before we wake up waiters. |
| 999 | */ |
Josef Bacik | 66657b3 | 2012-08-01 15:36:24 -0400 | [diff] [blame] | 1000 | smp_mb(); |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 1001 | if (waitqueue_active(ws_wait)) |
| 1002 | wake_up(ws_wait); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1003 | } |
| 1004 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 1005 | static void free_workspace(int type, struct list_head *ws) |
| 1006 | { |
| 1007 | return __free_workspace(type, ws, false); |
| 1008 | } |
| 1009 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1010 | /* |
| 1011 | * cleanup function for module exit |
| 1012 | */ |
| 1013 | static void free_workspaces(void) |
| 1014 | { |
| 1015 | struct list_head *workspace; |
| 1016 | int i; |
| 1017 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 1018 | while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { |
| 1019 | workspace = btrfs_heuristic_ws.idle_ws.next; |
| 1020 | list_del(workspace); |
| 1021 | free_heuristic_ws(workspace); |
| 1022 | atomic_dec(&btrfs_heuristic_ws.total_ws); |
| 1023 | } |
| 1024 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1025 | for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { |
Byongho Lee | d918764 | 2015-10-14 14:05:24 +0900 | [diff] [blame] | 1026 | while (!list_empty(&btrfs_comp_ws[i].idle_ws)) { |
| 1027 | workspace = btrfs_comp_ws[i].idle_ws.next; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1028 | list_del(workspace); |
| 1029 | btrfs_compress_op[i]->free_workspace(workspace); |
David Sterba | 6ac10a6 | 2016-04-27 02:15:15 +0200 | [diff] [blame] | 1030 | atomic_dec(&btrfs_comp_ws[i].total_ws); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1031 | } |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | /* |
David Sterba | 38c3146 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1036 | * Given an address space and start and length, compress the bytes into @pages |
| 1037 | * that are allocated on demand. |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1038 | * |
David Sterba | f51d2b5 | 2017-09-15 17:36:57 +0200 | [diff] [blame] | 1039 | * @type_level is encoded algorithm and level, where level 0 means whatever |
| 1040 | * default the algorithm chooses and is opaque here; |
| 1041 | * - compression algo are 0-3 |
| 1042 | * - the level are bits 4-7 |
| 1043 | * |
David Sterba | 4d3a800 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1044 | * @out_pages is an in/out parameter, holds maximum number of pages to allocate |
| 1045 | * and returns number of actually allocated pages |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1046 | * |
David Sterba | 38c3146 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1047 | * @total_in is used to return the number of bytes actually read. It |
| 1048 | * may be smaller than the input length if we had to exit early because we |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1049 | * ran out of room in the pages array or because we cross the |
| 1050 | * max_out threshold. |
| 1051 | * |
David Sterba | 38c3146 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1052 | * @total_out is an in/out parameter, must be set to the input length and will |
| 1053 | * be also used to return the total number of compressed bytes |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1054 | * |
David Sterba | 38c3146 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1055 | * @max_out tells us the max number of bytes that we're allowed to |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1056 | * stuff into pages |
| 1057 | */ |
David Sterba | f51d2b5 | 2017-09-15 17:36:57 +0200 | [diff] [blame] | 1058 | int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, |
David Sterba | 38c3146 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1059 | u64 start, struct page **pages, |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1060 | unsigned long *out_pages, |
| 1061 | unsigned long *total_in, |
David Sterba | e5d7490 | 2017-02-14 19:45:05 +0100 | [diff] [blame] | 1062 | unsigned long *total_out) |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1063 | { |
| 1064 | struct list_head *workspace; |
| 1065 | int ret; |
David Sterba | f51d2b5 | 2017-09-15 17:36:57 +0200 | [diff] [blame] | 1066 | int type = type_level & 0xF; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1067 | |
| 1068 | workspace = find_workspace(type); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1069 | |
David Sterba | f51d2b5 | 2017-09-15 17:36:57 +0200 | [diff] [blame] | 1070 | btrfs_compress_op[type - 1]->set_level(workspace, type_level); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1071 | ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, |
David Sterba | 38c3146 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1072 | start, pages, |
David Sterba | 4d3a800 | 2017-02-14 19:04:07 +0100 | [diff] [blame] | 1073 | out_pages, |
David Sterba | e5d7490 | 2017-02-14 19:45:05 +0100 | [diff] [blame] | 1074 | total_in, total_out); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1075 | free_workspace(type, workspace); |
| 1076 | return ret; |
| 1077 | } |
| 1078 | |
| 1079 | /* |
| 1080 | * pages_in is an array of pages with compressed data. |
| 1081 | * |
| 1082 | * disk_start is the starting logical offset of this array in the file |
| 1083 | * |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1084 | * orig_bio contains the pages from the file that we want to decompress into |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1085 | * |
| 1086 | * srclen is the number of bytes in pages_in |
| 1087 | * |
| 1088 | * The basic idea is that we have a bio that was created by readpages. |
| 1089 | * The pages in the bio are for the uncompressed data, and they may not |
| 1090 | * be contiguous. They all correspond to the range of bytes covered by |
| 1091 | * the compressed extent. |
| 1092 | */ |
Anand Jain | 8140dc3 | 2017-05-26 15:44:58 +0800 | [diff] [blame] | 1093 | static int btrfs_decompress_bio(struct compressed_bio *cb) |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1094 | { |
| 1095 | struct list_head *workspace; |
| 1096 | int ret; |
Anand Jain | 8140dc3 | 2017-05-26 15:44:58 +0800 | [diff] [blame] | 1097 | int type = cb->compress_type; |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1098 | |
| 1099 | workspace = find_workspace(type); |
Anand Jain | e1ddce7 | 2017-05-26 15:44:59 +0800 | [diff] [blame] | 1100 | ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1101 | free_workspace(type, workspace); |
Anand Jain | e1ddce7 | 2017-05-26 15:44:59 +0800 | [diff] [blame] | 1102 | |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1103 | return ret; |
| 1104 | } |
| 1105 | |
| 1106 | /* |
| 1107 | * a less complex decompression routine. Our compressed data fits in a |
| 1108 | * single page, and we want to read a single page out of it. |
| 1109 | * start_byte tells us the offset into the compressed data we're interested in |
| 1110 | */ |
| 1111 | int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, |
| 1112 | unsigned long start_byte, size_t srclen, size_t destlen) |
| 1113 | { |
| 1114 | struct list_head *workspace; |
| 1115 | int ret; |
| 1116 | |
| 1117 | workspace = find_workspace(type); |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1118 | |
| 1119 | ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, |
| 1120 | dest_page, start_byte, |
| 1121 | srclen, destlen); |
| 1122 | |
| 1123 | free_workspace(type, workspace); |
| 1124 | return ret; |
| 1125 | } |
| 1126 | |
Alexey Charkov | 8e4eef7 | 2011-02-02 21:15:35 +0000 | [diff] [blame] | 1127 | void btrfs_exit_compress(void) |
Li Zefan | 261507a0 | 2010-12-17 14:21:50 +0800 | [diff] [blame] | 1128 | { |
| 1129 | free_workspaces(); |
| 1130 | } |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1131 | |
| 1132 | /* |
| 1133 | * Copy uncompressed data from working buffer to pages. |
| 1134 | * |
| 1135 | * buf_start is the byte offset we're of the start of our workspace buffer. |
| 1136 | * |
| 1137 | * total_out is the last byte of the buffer |
| 1138 | */ |
David Sterba | 14a3357 | 2017-02-14 17:58:04 +0100 | [diff] [blame] | 1139 | int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1140 | unsigned long total_out, u64 disk_start, |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1141 | struct bio *bio) |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1142 | { |
| 1143 | unsigned long buf_offset; |
| 1144 | unsigned long current_buf_start; |
| 1145 | unsigned long start_byte; |
Omar Sandoval | 6e78b3f | 2017-02-10 15:03:35 -0800 | [diff] [blame] | 1146 | unsigned long prev_start_byte; |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1147 | unsigned long working_bytes = total_out - buf_start; |
| 1148 | unsigned long bytes; |
| 1149 | char *kaddr; |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1150 | struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1151 | |
| 1152 | /* |
| 1153 | * start byte is the first byte of the page we're currently |
| 1154 | * copying into relative to the start of the compressed data. |
| 1155 | */ |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1156 | start_byte = page_offset(bvec.bv_page) - disk_start; |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1157 | |
| 1158 | /* we haven't yet hit data corresponding to this page */ |
| 1159 | if (total_out <= start_byte) |
| 1160 | return 1; |
| 1161 | |
| 1162 | /* |
| 1163 | * the start of the data we care about is offset into |
| 1164 | * the middle of our working buffer |
| 1165 | */ |
| 1166 | if (total_out > start_byte && buf_start < start_byte) { |
| 1167 | buf_offset = start_byte - buf_start; |
| 1168 | working_bytes -= buf_offset; |
| 1169 | } else { |
| 1170 | buf_offset = 0; |
| 1171 | } |
| 1172 | current_buf_start = buf_start; |
| 1173 | |
| 1174 | /* copy bytes from the working buffer into the pages */ |
| 1175 | while (working_bytes > 0) { |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1176 | bytes = min_t(unsigned long, bvec.bv_len, |
| 1177 | PAGE_SIZE - buf_offset); |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1178 | bytes = min(bytes, working_bytes); |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1179 | |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1180 | kaddr = kmap_atomic(bvec.bv_page); |
| 1181 | memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); |
| 1182 | kunmap_atomic(kaddr); |
| 1183 | flush_dcache_page(bvec.bv_page); |
| 1184 | |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1185 | buf_offset += bytes; |
| 1186 | working_bytes -= bytes; |
| 1187 | current_buf_start += bytes; |
| 1188 | |
| 1189 | /* check if we need to pick another page */ |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1190 | bio_advance(bio, bytes); |
| 1191 | if (!bio->bi_iter.bi_size) |
| 1192 | return 0; |
| 1193 | bvec = bio_iter_iovec(bio, bio->bi_iter); |
Omar Sandoval | 6e78b3f | 2017-02-10 15:03:35 -0800 | [diff] [blame] | 1194 | prev_start_byte = start_byte; |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1195 | start_byte = page_offset(bvec.bv_page) - disk_start; |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1196 | |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1197 | /* |
Omar Sandoval | 6e78b3f | 2017-02-10 15:03:35 -0800 | [diff] [blame] | 1198 | * We need to make sure we're only adjusting |
| 1199 | * our offset into compression working buffer when |
| 1200 | * we're switching pages. Otherwise we can incorrectly |
| 1201 | * keep copying when we were actually done. |
Christoph Hellwig | 974b1ad | 2016-11-25 09:07:46 +0100 | [diff] [blame] | 1202 | */ |
Omar Sandoval | 6e78b3f | 2017-02-10 15:03:35 -0800 | [diff] [blame] | 1203 | if (start_byte != prev_start_byte) { |
| 1204 | /* |
| 1205 | * make sure our new page is covered by this |
| 1206 | * working buffer |
| 1207 | */ |
| 1208 | if (total_out <= start_byte) |
| 1209 | return 1; |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1210 | |
Omar Sandoval | 6e78b3f | 2017-02-10 15:03:35 -0800 | [diff] [blame] | 1211 | /* |
| 1212 | * the next page in the biovec might not be adjacent |
| 1213 | * to the last page, but it might still be found |
| 1214 | * inside this working buffer. bump our offset pointer |
| 1215 | */ |
| 1216 | if (total_out > start_byte && |
| 1217 | current_buf_start < start_byte) { |
| 1218 | buf_offset = start_byte - buf_start; |
| 1219 | working_bytes = total_out - start_byte; |
| 1220 | current_buf_start = buf_start + buf_offset; |
| 1221 | } |
Li Zefan | 3a39c18 | 2010-11-08 15:22:19 +0800 | [diff] [blame] | 1222 | } |
| 1223 | } |
| 1224 | |
| 1225 | return 1; |
| 1226 | } |
Timofey Titovets | c2fcdcd | 2017-07-17 16:52:58 +0300 | [diff] [blame] | 1227 | |
Timofey Titovets | 1956243 | 2017-10-08 16:11:59 +0300 | [diff] [blame] | 1228 | /* |
| 1229 | * Shannon Entropy calculation |
| 1230 | * |
| 1231 | * Pure byte distribution analysis fails to determine compressiability of data. |
| 1232 | * Try calculating entropy to estimate the average minimum number of bits |
| 1233 | * needed to encode the sampled data. |
| 1234 | * |
| 1235 | * For convenience, return the percentage of needed bits, instead of amount of |
| 1236 | * bits directly. |
| 1237 | * |
| 1238 | * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy |
| 1239 | * and can be compressible with high probability |
| 1240 | * |
| 1241 | * @ENTROPY_LVL_HIGH - data are not compressible with high probability |
| 1242 | * |
| 1243 | * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate. |
| 1244 | */ |
| 1245 | #define ENTROPY_LVL_ACEPTABLE (65) |
| 1246 | #define ENTROPY_LVL_HIGH (80) |
| 1247 | |
| 1248 | /* |
| 1249 | * For increasead precision in shannon_entropy calculation, |
| 1250 | * let's do pow(n, M) to save more digits after comma: |
| 1251 | * |
| 1252 | * - maximum int bit length is 64 |
| 1253 | * - ilog2(MAX_SAMPLE_SIZE) -> 13 |
| 1254 | * - 13 * 4 = 52 < 64 -> M = 4 |
| 1255 | * |
| 1256 | * So use pow(n, 4). |
| 1257 | */ |
| 1258 | static inline u32 ilog2_w(u64 n) |
| 1259 | { |
| 1260 | return ilog2(n * n * n * n); |
| 1261 | } |
| 1262 | |
| 1263 | static u32 shannon_entropy(struct heuristic_ws *ws) |
| 1264 | { |
| 1265 | const u32 entropy_max = 8 * ilog2_w(2); |
| 1266 | u32 entropy_sum = 0; |
| 1267 | u32 p, p_base, sz_base; |
| 1268 | u32 i; |
| 1269 | |
| 1270 | sz_base = ilog2_w(ws->sample_size); |
| 1271 | for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) { |
| 1272 | p = ws->bucket[i].count; |
| 1273 | p_base = ilog2_w(p); |
| 1274 | entropy_sum += p * (sz_base - p_base); |
| 1275 | } |
| 1276 | |
| 1277 | entropy_sum /= ws->sample_size; |
| 1278 | return entropy_sum * 100 / entropy_max; |
| 1279 | } |
| 1280 | |
Timofey Titovets | 858177d | 2017-09-28 17:33:41 +0300 | [diff] [blame] | 1281 | /* Compare buckets by size, ascending */ |
| 1282 | static int bucket_comp_rev(const void *lv, const void *rv) |
| 1283 | { |
| 1284 | const struct bucket_item *l = (const struct bucket_item *)lv; |
| 1285 | const struct bucket_item *r = (const struct bucket_item *)rv; |
| 1286 | |
| 1287 | return r->count - l->count; |
| 1288 | } |
| 1289 | |
| 1290 | /* |
| 1291 | * Size of the core byte set - how many bytes cover 90% of the sample |
| 1292 | * |
| 1293 | * There are several types of structured binary data that use nearly all byte |
| 1294 | * values. The distribution can be uniform and counts in all buckets will be |
| 1295 | * nearly the same (eg. encrypted data). Unlikely to be compressible. |
| 1296 | * |
| 1297 | * Other possibility is normal (Gaussian) distribution, where the data could |
| 1298 | * be potentially compressible, but we have to take a few more steps to decide |
| 1299 | * how much. |
| 1300 | * |
| 1301 | * @BYTE_CORE_SET_LOW - main part of byte values repeated frequently, |
| 1302 | * compression algo can easy fix that |
| 1303 | * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high |
| 1304 | * probability is not compressible |
| 1305 | */ |
| 1306 | #define BYTE_CORE_SET_LOW (64) |
| 1307 | #define BYTE_CORE_SET_HIGH (200) |
| 1308 | |
| 1309 | static int byte_core_set_size(struct heuristic_ws *ws) |
| 1310 | { |
| 1311 | u32 i; |
| 1312 | u32 coreset_sum = 0; |
| 1313 | const u32 core_set_threshold = ws->sample_size * 90 / 100; |
| 1314 | struct bucket_item *bucket = ws->bucket; |
| 1315 | |
| 1316 | /* Sort in reverse order */ |
| 1317 | sort(bucket, BUCKET_SIZE, sizeof(*bucket), &bucket_comp_rev, NULL); |
| 1318 | |
| 1319 | for (i = 0; i < BYTE_CORE_SET_LOW; i++) |
| 1320 | coreset_sum += bucket[i].count; |
| 1321 | |
| 1322 | if (coreset_sum > core_set_threshold) |
| 1323 | return i; |
| 1324 | |
| 1325 | for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) { |
| 1326 | coreset_sum += bucket[i].count; |
| 1327 | if (coreset_sum > core_set_threshold) |
| 1328 | break; |
| 1329 | } |
| 1330 | |
| 1331 | return i; |
| 1332 | } |
| 1333 | |
Timofey Titovets | a288e92 | 2017-09-28 17:33:40 +0300 | [diff] [blame] | 1334 | /* |
| 1335 | * Count byte values in buckets. |
| 1336 | * This heuristic can detect textual data (configs, xml, json, html, etc). |
| 1337 | * Because in most text-like data byte set is restricted to limited number of |
| 1338 | * possible characters, and that restriction in most cases makes data easy to |
| 1339 | * compress. |
| 1340 | * |
| 1341 | * @BYTE_SET_THRESHOLD - consider all data within this byte set size: |
| 1342 | * less - compressible |
| 1343 | * more - need additional analysis |
| 1344 | */ |
| 1345 | #define BYTE_SET_THRESHOLD (64) |
| 1346 | |
| 1347 | static u32 byte_set_size(const struct heuristic_ws *ws) |
| 1348 | { |
| 1349 | u32 i; |
| 1350 | u32 byte_set_size = 0; |
| 1351 | |
| 1352 | for (i = 0; i < BYTE_SET_THRESHOLD; i++) { |
| 1353 | if (ws->bucket[i].count > 0) |
| 1354 | byte_set_size++; |
| 1355 | } |
| 1356 | |
| 1357 | /* |
| 1358 | * Continue collecting count of byte values in buckets. If the byte |
| 1359 | * set size is bigger then the threshold, it's pointless to continue, |
| 1360 | * the detection technique would fail for this type of data. |
| 1361 | */ |
| 1362 | for (; i < BUCKET_SIZE; i++) { |
| 1363 | if (ws->bucket[i].count > 0) { |
| 1364 | byte_set_size++; |
| 1365 | if (byte_set_size > BYTE_SET_THRESHOLD) |
| 1366 | return byte_set_size; |
| 1367 | } |
| 1368 | } |
| 1369 | |
| 1370 | return byte_set_size; |
| 1371 | } |
| 1372 | |
Timofey Titovets | 1fe4f6f | 2017-09-28 17:33:39 +0300 | [diff] [blame] | 1373 | static bool sample_repeated_patterns(struct heuristic_ws *ws) |
| 1374 | { |
| 1375 | const u32 half_of_sample = ws->sample_size / 2; |
| 1376 | const u8 *data = ws->sample; |
| 1377 | |
| 1378 | return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0; |
| 1379 | } |
| 1380 | |
Timofey Titovets | a440d48 | 2017-09-28 17:33:38 +0300 | [diff] [blame] | 1381 | static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, |
| 1382 | struct heuristic_ws *ws) |
| 1383 | { |
| 1384 | struct page *page; |
| 1385 | u64 index, index_end; |
| 1386 | u32 i, curr_sample_pos; |
| 1387 | u8 *in_data; |
| 1388 | |
| 1389 | /* |
| 1390 | * Compression handles the input data by chunks of 128KiB |
| 1391 | * (defined by BTRFS_MAX_UNCOMPRESSED) |
| 1392 | * |
| 1393 | * We do the same for the heuristic and loop over the whole range. |
| 1394 | * |
| 1395 | * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will |
| 1396 | * process no more than BTRFS_MAX_UNCOMPRESSED at a time. |
| 1397 | */ |
| 1398 | if (end - start > BTRFS_MAX_UNCOMPRESSED) |
| 1399 | end = start + BTRFS_MAX_UNCOMPRESSED; |
| 1400 | |
| 1401 | index = start >> PAGE_SHIFT; |
| 1402 | index_end = end >> PAGE_SHIFT; |
| 1403 | |
| 1404 | /* Don't miss unaligned end */ |
| 1405 | if (!IS_ALIGNED(end, PAGE_SIZE)) |
| 1406 | index_end++; |
| 1407 | |
| 1408 | curr_sample_pos = 0; |
| 1409 | while (index < index_end) { |
| 1410 | page = find_get_page(inode->i_mapping, index); |
| 1411 | in_data = kmap(page); |
| 1412 | /* Handle case where the start is not aligned to PAGE_SIZE */ |
| 1413 | i = start % PAGE_SIZE; |
| 1414 | while (i < PAGE_SIZE - SAMPLING_READ_SIZE) { |
| 1415 | /* Don't sample any garbage from the last page */ |
| 1416 | if (start > end - SAMPLING_READ_SIZE) |
| 1417 | break; |
| 1418 | memcpy(&ws->sample[curr_sample_pos], &in_data[i], |
| 1419 | SAMPLING_READ_SIZE); |
| 1420 | i += SAMPLING_INTERVAL; |
| 1421 | start += SAMPLING_INTERVAL; |
| 1422 | curr_sample_pos += SAMPLING_READ_SIZE; |
| 1423 | } |
| 1424 | kunmap(page); |
| 1425 | put_page(page); |
| 1426 | |
| 1427 | index++; |
| 1428 | } |
| 1429 | |
| 1430 | ws->sample_size = curr_sample_pos; |
| 1431 | } |
| 1432 | |
Timofey Titovets | c2fcdcd | 2017-07-17 16:52:58 +0300 | [diff] [blame] | 1433 | /* |
| 1434 | * Compression heuristic. |
| 1435 | * |
| 1436 | * For now is's a naive and optimistic 'return true', we'll extend the logic to |
| 1437 | * quickly (compared to direct compression) detect data characteristics |
| 1438 | * (compressible/uncompressible) to avoid wasting CPU time on uncompressible |
| 1439 | * data. |
| 1440 | * |
| 1441 | * The following types of analysis can be performed: |
| 1442 | * - detect mostly zero data |
| 1443 | * - detect data with low "byte set" size (text, etc) |
| 1444 | * - detect data with low/high "core byte" set |
| 1445 | * |
| 1446 | * Return non-zero if the compression should be done, 0 otherwise. |
| 1447 | */ |
| 1448 | int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) |
| 1449 | { |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 1450 | struct list_head *ws_list = __find_workspace(0, true); |
| 1451 | struct heuristic_ws *ws; |
Timofey Titovets | a440d48 | 2017-09-28 17:33:38 +0300 | [diff] [blame] | 1452 | u32 i; |
| 1453 | u8 byte; |
Timofey Titovets | 1956243 | 2017-10-08 16:11:59 +0300 | [diff] [blame] | 1454 | int ret = 0; |
Timofey Titovets | c2fcdcd | 2017-07-17 16:52:58 +0300 | [diff] [blame] | 1455 | |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 1456 | ws = list_entry(ws_list, struct heuristic_ws, list); |
| 1457 | |
Timofey Titovets | a440d48 | 2017-09-28 17:33:38 +0300 | [diff] [blame] | 1458 | heuristic_collect_sample(inode, start, end, ws); |
| 1459 | |
Timofey Titovets | 1fe4f6f | 2017-09-28 17:33:39 +0300 | [diff] [blame] | 1460 | if (sample_repeated_patterns(ws)) { |
| 1461 | ret = 1; |
| 1462 | goto out; |
| 1463 | } |
| 1464 | |
Timofey Titovets | a440d48 | 2017-09-28 17:33:38 +0300 | [diff] [blame] | 1465 | memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE); |
| 1466 | |
| 1467 | for (i = 0; i < ws->sample_size; i++) { |
| 1468 | byte = ws->sample[i]; |
| 1469 | ws->bucket[byte].count++; |
Timofey Titovets | c2fcdcd | 2017-07-17 16:52:58 +0300 | [diff] [blame] | 1470 | } |
| 1471 | |
Timofey Titovets | a288e92 | 2017-09-28 17:33:40 +0300 | [diff] [blame] | 1472 | i = byte_set_size(ws); |
| 1473 | if (i < BYTE_SET_THRESHOLD) { |
| 1474 | ret = 2; |
| 1475 | goto out; |
| 1476 | } |
| 1477 | |
Timofey Titovets | 858177d | 2017-09-28 17:33:41 +0300 | [diff] [blame] | 1478 | i = byte_core_set_size(ws); |
| 1479 | if (i <= BYTE_CORE_SET_LOW) { |
| 1480 | ret = 3; |
| 1481 | goto out; |
| 1482 | } |
| 1483 | |
| 1484 | if (i >= BYTE_CORE_SET_HIGH) { |
| 1485 | ret = 0; |
| 1486 | goto out; |
| 1487 | } |
| 1488 | |
Timofey Titovets | 1956243 | 2017-10-08 16:11:59 +0300 | [diff] [blame] | 1489 | i = shannon_entropy(ws); |
| 1490 | if (i <= ENTROPY_LVL_ACEPTABLE) { |
| 1491 | ret = 4; |
| 1492 | goto out; |
| 1493 | } |
| 1494 | |
| 1495 | /* |
| 1496 | * For the levels below ENTROPY_LVL_HIGH, additional analysis would be |
| 1497 | * needed to give green light to compression. |
| 1498 | * |
| 1499 | * For now just assume that compression at that level is not worth the |
| 1500 | * resources because: |
| 1501 | * |
| 1502 | * 1. it is possible to defrag the data later |
| 1503 | * |
| 1504 | * 2. the data would turn out to be hardly compressible, eg. 150 byte |
| 1505 | * values, every bucket has counter at level ~54. The heuristic would |
| 1506 | * be confused. This can happen when data have some internal repeated |
| 1507 | * patterns like "abbacbbc...". This can be detected by analyzing |
| 1508 | * pairs of bytes, which is too costly. |
| 1509 | */ |
| 1510 | if (i < ENTROPY_LVL_HIGH) { |
| 1511 | ret = 5; |
| 1512 | goto out; |
| 1513 | } else { |
| 1514 | ret = 0; |
| 1515 | goto out; |
| 1516 | } |
| 1517 | |
Timofey Titovets | 1fe4f6f | 2017-09-28 17:33:39 +0300 | [diff] [blame] | 1518 | out: |
Timofey Titovets | 4e439a0 | 2017-09-28 17:33:36 +0300 | [diff] [blame] | 1519 | __free_workspace(0, ws_list, true); |
Timofey Titovets | c2fcdcd | 2017-07-17 16:52:58 +0300 | [diff] [blame] | 1520 | return ret; |
| 1521 | } |
David Sterba | f51d2b5 | 2017-09-15 17:36:57 +0200 | [diff] [blame] | 1522 | |
| 1523 | unsigned int btrfs_compress_str2level(const char *str) |
| 1524 | { |
| 1525 | if (strncmp(str, "zlib", 4) != 0) |
| 1526 | return 0; |
| 1527 | |
Adam Borowski | fa4d885 | 2017-09-15 17:36:58 +0200 | [diff] [blame] | 1528 | /* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */ |
| 1529 | if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0) |
| 1530 | return str[5] - '0'; |
David Sterba | f51d2b5 | 2017-09-15 17:36:57 +0200 | [diff] [blame] | 1531 | |
Qu Wenruo | eae8d82 | 2017-11-06 10:43:18 +0800 | [diff] [blame] | 1532 | return BTRFS_ZLIB_DEFAULT_LEVEL; |
David Sterba | f51d2b5 | 2017-09-15 17:36:57 +0200 | [diff] [blame] | 1533 | } |