David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (c) 2016-present, Facebook, Inc. |
| 4 | * All rights reserved. |
| 5 | * |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 6 | */ |
David Sterba | c1d7c51 | 2018-04-03 19:23:33 +0200 | [diff] [blame] | 7 | |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 8 | #include <linux/bio.h> |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 9 | #include <linux/bitmap.h> |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 10 | #include <linux/err.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mm.h> |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 14 | #include <linux/sched/mm.h> |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 15 | #include <linux/pagemap.h> |
| 16 | #include <linux/refcount.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/zstd.h> |
David Sterba | 602cbe9 | 2019-08-21 18:48:25 +0200 | [diff] [blame] | 20 | #include "misc.h" |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 21 | #include "compression.h" |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 22 | #include "ctree.h" |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 23 | |
| 24 | #define ZSTD_BTRFS_MAX_WINDOWLOG 17 |
| 25 | #define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) |
| 26 | #define ZSTD_BTRFS_DEFAULT_LEVEL 3 |
Dennis Zhou | d3c6ab7 | 2019-02-04 15:20:07 -0500 | [diff] [blame] | 27 | #define ZSTD_BTRFS_MAX_LEVEL 15 |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 28 | /* 307s to avoid pathologically clashing with transaction commit */ |
| 29 | #define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 30 | |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 31 | static zstd_parameters zstd_get_btrfs_parameters(unsigned int level, |
Dennis Zhou | e0dc87a | 2019-02-04 15:20:06 -0500 | [diff] [blame] | 32 | size_t src_len) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 33 | { |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 34 | zstd_parameters params = zstd_get_params(level, src_len); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 35 | |
| 36 | if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) |
| 37 | params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; |
| 38 | WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); |
| 39 | return params; |
| 40 | } |
| 41 | |
| 42 | struct workspace { |
| 43 | void *mem; |
| 44 | size_t size; |
| 45 | char *buf; |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 46 | unsigned int level; |
Dennis Zhou | e0dc87a | 2019-02-04 15:20:06 -0500 | [diff] [blame] | 47 | unsigned int req_level; |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 48 | unsigned long last_used; /* jiffies */ |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 49 | struct list_head list; |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 50 | struct list_head lru_list; |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 51 | zstd_in_buffer in_buf; |
| 52 | zstd_out_buffer out_buf; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 53 | }; |
| 54 | |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 55 | /* |
| 56 | * Zstd Workspace Management |
| 57 | * |
| 58 | * Zstd workspaces have different memory requirements depending on the level. |
| 59 | * The zstd workspaces are managed by having individual lists for each level |
| 60 | * and a global lru. Forward progress is maintained by protecting a max level |
| 61 | * workspace. |
| 62 | * |
| 63 | * Getting a workspace is done by using the bitmap to identify the levels that |
| 64 | * have available workspaces and scans up. This lets us recycle higher level |
| 65 | * workspaces because of the monotonic memory guarantee. A workspace's |
| 66 | * last_used is only updated if it is being used by the corresponding memory |
| 67 | * level. Putting a workspace involves adding it back to the appropriate places |
| 68 | * and adding it back to the lru if necessary. |
| 69 | * |
| 70 | * A timer is used to reclaim workspaces if they have not been used for |
| 71 | * ZSTD_BTRFS_RECLAIM_JIFFIES. This helps keep only active workspaces around. |
| 72 | * The upper bound is provided by the workqueue limit which is 2 (percpu limit). |
| 73 | */ |
| 74 | |
| 75 | struct zstd_workspace_manager { |
| 76 | const struct btrfs_compress_op *ops; |
| 77 | spinlock_t lock; |
| 78 | struct list_head lru_list; |
| 79 | struct list_head idle_ws[ZSTD_BTRFS_MAX_LEVEL]; |
| 80 | unsigned long active_map; |
| 81 | wait_queue_head_t wait; |
| 82 | struct timer_list timer; |
| 83 | }; |
| 84 | |
| 85 | static struct zstd_workspace_manager wsm; |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 86 | |
Dennis Zhou | d3c6ab7 | 2019-02-04 15:20:07 -0500 | [diff] [blame] | 87 | static size_t zstd_ws_mem_sizes[ZSTD_BTRFS_MAX_LEVEL]; |
| 88 | |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 89 | static inline struct workspace *list_to_workspace(struct list_head *list) |
| 90 | { |
| 91 | return container_of(list, struct workspace, list); |
| 92 | } |
| 93 | |
David Sterba | d20f395 | 2019-10-04 02:21:48 +0200 | [diff] [blame] | 94 | void zstd_free_workspace(struct list_head *ws); |
| 95 | struct list_head *zstd_alloc_workspace(unsigned int level); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 96 | /* |
| 97 | * zstd_reclaim_timer_fn - reclaim timer |
| 98 | * @t: timer |
| 99 | * |
| 100 | * This scans the lru_list and attempts to reclaim any workspace that hasn't |
| 101 | * been used for ZSTD_BTRFS_RECLAIM_JIFFIES. |
| 102 | */ |
| 103 | static void zstd_reclaim_timer_fn(struct timer_list *timer) |
| 104 | { |
| 105 | unsigned long reclaim_threshold = jiffies - ZSTD_BTRFS_RECLAIM_JIFFIES; |
| 106 | struct list_head *pos, *next; |
| 107 | |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 108 | spin_lock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 109 | |
| 110 | if (list_empty(&wsm.lru_list)) { |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 111 | spin_unlock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 112 | return; |
| 113 | } |
| 114 | |
| 115 | list_for_each_prev_safe(pos, next, &wsm.lru_list) { |
| 116 | struct workspace *victim = container_of(pos, struct workspace, |
| 117 | lru_list); |
| 118 | unsigned int level; |
| 119 | |
| 120 | if (time_after(victim->last_used, reclaim_threshold)) |
| 121 | break; |
| 122 | |
| 123 | /* workspace is in use */ |
| 124 | if (victim->req_level) |
| 125 | continue; |
| 126 | |
| 127 | level = victim->level; |
| 128 | list_del(&victim->lru_list); |
| 129 | list_del(&victim->list); |
Dennis Zhou | b242349 | 2019-02-27 16:21:28 -0500 | [diff] [blame] | 130 | zstd_free_workspace(&victim->list); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 131 | |
| 132 | if (list_empty(&wsm.idle_ws[level - 1])) |
| 133 | clear_bit(level - 1, &wsm.active_map); |
| 134 | |
| 135 | } |
| 136 | |
| 137 | if (!list_empty(&wsm.lru_list)) |
| 138 | mod_timer(&wsm.timer, jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); |
| 139 | |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 140 | spin_unlock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 141 | } |
| 142 | |
Dennis Zhou | d3c6ab7 | 2019-02-04 15:20:07 -0500 | [diff] [blame] | 143 | /* |
| 144 | * zstd_calc_ws_mem_sizes - calculate monotonic memory bounds |
| 145 | * |
| 146 | * It is possible based on the level configurations that a higher level |
| 147 | * workspace uses less memory than a lower level workspace. In order to reuse |
| 148 | * workspaces, this must be made a monotonic relationship. This precomputes |
| 149 | * the required memory for each level and enforces the monotonicity between |
| 150 | * level and memory required. |
| 151 | */ |
| 152 | static void zstd_calc_ws_mem_sizes(void) |
| 153 | { |
| 154 | size_t max_size = 0; |
| 155 | unsigned int level; |
| 156 | |
| 157 | for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) { |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 158 | zstd_parameters params = |
Dennis Zhou | d3c6ab7 | 2019-02-04 15:20:07 -0500 | [diff] [blame] | 159 | zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT); |
| 160 | size_t level_size = |
| 161 | max_t(size_t, |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 162 | zstd_cstream_workspace_bound(¶ms.cParams), |
| 163 | zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT)); |
Dennis Zhou | d3c6ab7 | 2019-02-04 15:20:07 -0500 | [diff] [blame] | 164 | |
| 165 | max_size = max_t(size_t, max_size, level_size); |
| 166 | zstd_ws_mem_sizes[level - 1] = max_size; |
| 167 | } |
| 168 | } |
| 169 | |
David Sterba | d551703 | 2019-10-02 01:08:03 +0200 | [diff] [blame] | 170 | void zstd_init_workspace_manager(void) |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 171 | { |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 172 | struct list_head *ws; |
| 173 | int i; |
| 174 | |
Dennis Zhou | d3c6ab7 | 2019-02-04 15:20:07 -0500 | [diff] [blame] | 175 | zstd_calc_ws_mem_sizes(); |
| 176 | |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 177 | wsm.ops = &btrfs_zstd_compress; |
| 178 | spin_lock_init(&wsm.lock); |
| 179 | init_waitqueue_head(&wsm.wait); |
| 180 | timer_setup(&wsm.timer, zstd_reclaim_timer_fn, 0); |
| 181 | |
| 182 | INIT_LIST_HEAD(&wsm.lru_list); |
| 183 | for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) |
| 184 | INIT_LIST_HEAD(&wsm.idle_ws[i]); |
| 185 | |
Dennis Zhou | b242349 | 2019-02-27 16:21:28 -0500 | [diff] [blame] | 186 | ws = zstd_alloc_workspace(ZSTD_BTRFS_MAX_LEVEL); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 187 | if (IS_ERR(ws)) { |
| 188 | pr_warn( |
| 189 | "BTRFS: cannot preallocate zstd compression workspace\n"); |
| 190 | } else { |
| 191 | set_bit(ZSTD_BTRFS_MAX_LEVEL - 1, &wsm.active_map); |
| 192 | list_add(ws, &wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1]); |
| 193 | } |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 194 | } |
| 195 | |
David Sterba | 2510307 | 2019-10-02 01:08:03 +0200 | [diff] [blame] | 196 | void zstd_cleanup_workspace_manager(void) |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 197 | { |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 198 | struct workspace *workspace; |
| 199 | int i; |
| 200 | |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 201 | spin_lock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 202 | for (i = 0; i < ZSTD_BTRFS_MAX_LEVEL; i++) { |
| 203 | while (!list_empty(&wsm.idle_ws[i])) { |
| 204 | workspace = container_of(wsm.idle_ws[i].next, |
| 205 | struct workspace, list); |
| 206 | list_del(&workspace->list); |
| 207 | list_del(&workspace->lru_list); |
Dennis Zhou | b242349 | 2019-02-27 16:21:28 -0500 | [diff] [blame] | 208 | zstd_free_workspace(&workspace->list); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 209 | } |
| 210 | } |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 211 | spin_unlock_bh(&wsm.lock); |
Dennis Zhou | d386515 | 2019-02-22 14:53:48 -0500 | [diff] [blame] | 212 | |
| 213 | del_timer_sync(&wsm.timer); |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 214 | } |
| 215 | |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 216 | /* |
| 217 | * zstd_find_workspace - find workspace |
| 218 | * @level: compression level |
| 219 | * |
| 220 | * This iterates over the set bits in the active_map beginning at the requested |
| 221 | * compression level. This lets us utilize already allocated workspaces before |
| 222 | * allocating a new one. If the workspace is of a larger size, it is used, but |
| 223 | * the place in the lru_list and last_used times are not updated. This is to |
| 224 | * offer the opportunity to reclaim the workspace in favor of allocating an |
| 225 | * appropriately sized one in the future. |
| 226 | */ |
| 227 | static struct list_head *zstd_find_workspace(unsigned int level) |
| 228 | { |
| 229 | struct list_head *ws; |
| 230 | struct workspace *workspace; |
| 231 | int i = level - 1; |
| 232 | |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 233 | spin_lock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 234 | for_each_set_bit_from(i, &wsm.active_map, ZSTD_BTRFS_MAX_LEVEL) { |
| 235 | if (!list_empty(&wsm.idle_ws[i])) { |
| 236 | ws = wsm.idle_ws[i].next; |
| 237 | workspace = list_to_workspace(ws); |
| 238 | list_del_init(ws); |
| 239 | /* keep its place if it's a lower level using this */ |
| 240 | workspace->req_level = level; |
| 241 | if (level == workspace->level) |
| 242 | list_del(&workspace->lru_list); |
| 243 | if (list_empty(&wsm.idle_ws[i])) |
| 244 | clear_bit(i, &wsm.active_map); |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 245 | spin_unlock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 246 | return ws; |
| 247 | } |
| 248 | } |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 249 | spin_unlock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 250 | |
| 251 | return NULL; |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * zstd_get_workspace - zstd's get_workspace |
| 256 | * @level: compression level |
| 257 | * |
| 258 | * If @level is 0, then any compression level can be used. Therefore, we begin |
| 259 | * scanning from 1. We first scan through possible workspaces and then after |
| 260 | * attempt to allocate a new workspace. If we fail to allocate one due to |
| 261 | * memory pressure, go to sleep waiting for the max level workspace to free up. |
| 262 | */ |
David Sterba | d20f395 | 2019-10-04 02:21:48 +0200 | [diff] [blame] | 263 | struct list_head *zstd_get_workspace(unsigned int level) |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 264 | { |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 265 | struct list_head *ws; |
| 266 | unsigned int nofs_flag; |
Dennis Zhou | e0dc87a | 2019-02-04 15:20:06 -0500 | [diff] [blame] | 267 | |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 268 | /* level == 0 means we can use any workspace */ |
| 269 | if (!level) |
| 270 | level = 1; |
| 271 | |
| 272 | again: |
| 273 | ws = zstd_find_workspace(level); |
| 274 | if (ws) |
| 275 | return ws; |
| 276 | |
| 277 | nofs_flag = memalloc_nofs_save(); |
Dennis Zhou | b242349 | 2019-02-27 16:21:28 -0500 | [diff] [blame] | 278 | ws = zstd_alloc_workspace(level); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 279 | memalloc_nofs_restore(nofs_flag); |
| 280 | |
| 281 | if (IS_ERR(ws)) { |
| 282 | DEFINE_WAIT(wait); |
| 283 | |
| 284 | prepare_to_wait(&wsm.wait, &wait, TASK_UNINTERRUPTIBLE); |
| 285 | schedule(); |
| 286 | finish_wait(&wsm.wait, &wait); |
| 287 | |
| 288 | goto again; |
| 289 | } |
Dennis Zhou | e0dc87a | 2019-02-04 15:20:06 -0500 | [diff] [blame] | 290 | |
| 291 | return ws; |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 292 | } |
| 293 | |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 294 | /* |
| 295 | * zstd_put_workspace - zstd put_workspace |
| 296 | * @ws: list_head for the workspace |
| 297 | * |
| 298 | * When putting back a workspace, we only need to update the LRU if we are of |
| 299 | * the requested compression level. Here is where we continue to protect the |
| 300 | * max level workspace or update last_used accordingly. If the reclaim timer |
| 301 | * isn't set, it is also set here. Only the max level workspace tries and wakes |
| 302 | * up waiting workspaces. |
| 303 | */ |
David Sterba | d20f395 | 2019-10-04 02:21:48 +0200 | [diff] [blame] | 304 | void zstd_put_workspace(struct list_head *ws) |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 305 | { |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 306 | struct workspace *workspace = list_to_workspace(ws); |
| 307 | |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 308 | spin_lock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 309 | |
| 310 | /* A node is only taken off the lru if we are the corresponding level */ |
| 311 | if (workspace->req_level == workspace->level) { |
| 312 | /* Hide a max level workspace from reclaim */ |
| 313 | if (list_empty(&wsm.idle_ws[ZSTD_BTRFS_MAX_LEVEL - 1])) { |
| 314 | INIT_LIST_HEAD(&workspace->lru_list); |
| 315 | } else { |
| 316 | workspace->last_used = jiffies; |
| 317 | list_add(&workspace->lru_list, &wsm.lru_list); |
| 318 | if (!timer_pending(&wsm.timer)) |
| 319 | mod_timer(&wsm.timer, |
| 320 | jiffies + ZSTD_BTRFS_RECLAIM_JIFFIES); |
| 321 | } |
| 322 | } |
| 323 | |
| 324 | set_bit(workspace->level - 1, &wsm.active_map); |
| 325 | list_add(&workspace->list, &wsm.idle_ws[workspace->level - 1]); |
| 326 | workspace->req_level = 0; |
| 327 | |
Dennis Zhou | fee13fe | 2019-05-17 19:16:26 -0400 | [diff] [blame] | 328 | spin_unlock_bh(&wsm.lock); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 329 | |
| 330 | if (workspace->level == ZSTD_BTRFS_MAX_LEVEL) |
| 331 | cond_wake_up(&wsm.wait); |
Dennis Zhou | 92ee5530 | 2019-02-04 15:20:03 -0500 | [diff] [blame] | 332 | } |
| 333 | |
David Sterba | d20f395 | 2019-10-04 02:21:48 +0200 | [diff] [blame] | 334 | void zstd_free_workspace(struct list_head *ws) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 335 | { |
| 336 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
| 337 | |
| 338 | kvfree(workspace->mem); |
| 339 | kfree(workspace->buf); |
| 340 | kfree(workspace); |
| 341 | } |
| 342 | |
David Sterba | d20f395 | 2019-10-04 02:21:48 +0200 | [diff] [blame] | 343 | struct list_head *zstd_alloc_workspace(unsigned int level) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 344 | { |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 345 | struct workspace *workspace; |
| 346 | |
| 347 | workspace = kzalloc(sizeof(*workspace), GFP_KERNEL); |
| 348 | if (!workspace) |
| 349 | return ERR_PTR(-ENOMEM); |
| 350 | |
Dennis Zhou | d3c6ab7 | 2019-02-04 15:20:07 -0500 | [diff] [blame] | 351 | workspace->size = zstd_ws_mem_sizes[level - 1]; |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 352 | workspace->level = level; |
| 353 | workspace->req_level = level; |
| 354 | workspace->last_used = jiffies; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 355 | workspace->mem = kvmalloc(workspace->size, GFP_KERNEL); |
| 356 | workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 357 | if (!workspace->mem || !workspace->buf) |
| 358 | goto fail; |
| 359 | |
| 360 | INIT_LIST_HEAD(&workspace->list); |
Dennis Zhou | 3f93aef | 2019-02-04 15:20:08 -0500 | [diff] [blame] | 361 | INIT_LIST_HEAD(&workspace->lru_list); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 362 | |
| 363 | return &workspace->list; |
| 364 | fail: |
| 365 | zstd_free_workspace(&workspace->list); |
| 366 | return ERR_PTR(-ENOMEM); |
| 367 | } |
| 368 | |
David Sterba | c4bf665 | 2019-10-01 22:38:34 +0200 | [diff] [blame] | 369 | int zstd_compress_pages(struct list_head *ws, struct address_space *mapping, |
| 370 | u64 start, struct page **pages, unsigned long *out_pages, |
| 371 | unsigned long *total_in, unsigned long *total_out) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 372 | { |
| 373 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 374 | zstd_cstream *stream; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 375 | int ret = 0; |
| 376 | int nr_pages = 0; |
| 377 | struct page *in_page = NULL; /* The current page to read */ |
| 378 | struct page *out_page = NULL; /* The current page to write to */ |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 379 | unsigned long tot_in = 0; |
| 380 | unsigned long tot_out = 0; |
| 381 | unsigned long len = *total_out; |
| 382 | const unsigned long nr_dest_pages = *out_pages; |
| 383 | unsigned long max_out = nr_dest_pages * PAGE_SIZE; |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 384 | zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level, |
Dennis Zhou | e0dc87a | 2019-02-04 15:20:06 -0500 | [diff] [blame] | 385 | len); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 386 | |
| 387 | *out_pages = 0; |
| 388 | *total_out = 0; |
| 389 | *total_in = 0; |
| 390 | |
| 391 | /* Initialize the stream */ |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 392 | stream = zstd_init_cstream(¶ms, len, workspace->mem, |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 393 | workspace->size); |
| 394 | if (!stream) { |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 395 | pr_warn("BTRFS: zstd_init_cstream failed\n"); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 396 | ret = -EIO; |
| 397 | goto out; |
| 398 | } |
| 399 | |
| 400 | /* map in the first page of input data */ |
| 401 | in_page = find_get_page(mapping, start >> PAGE_SHIFT); |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 402 | workspace->in_buf.src = kmap(in_page); |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 403 | workspace->in_buf.pos = 0; |
| 404 | workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 405 | |
| 406 | |
| 407 | /* Allocate and map in the output buffer */ |
David Sterba | b0ee5e1 | 2021-06-14 22:22:22 +0200 | [diff] [blame] | 408 | out_page = alloc_page(GFP_NOFS); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 409 | if (out_page == NULL) { |
| 410 | ret = -ENOMEM; |
| 411 | goto out; |
| 412 | } |
| 413 | pages[nr_pages++] = out_page; |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 414 | workspace->out_buf.dst = kmap(out_page); |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 415 | workspace->out_buf.pos = 0; |
| 416 | workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 417 | |
| 418 | while (1) { |
| 419 | size_t ret2; |
| 420 | |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 421 | ret2 = zstd_compress_stream(stream, &workspace->out_buf, |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 422 | &workspace->in_buf); |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 423 | if (zstd_is_error(ret2)) { |
| 424 | pr_debug("BTRFS: zstd_compress_stream returned %d\n", |
| 425 | zstd_get_error_code(ret2)); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 426 | ret = -EIO; |
| 427 | goto out; |
| 428 | } |
| 429 | |
| 430 | /* Check to see if we are making it bigger */ |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 431 | if (tot_in + workspace->in_buf.pos > 8192 && |
| 432 | tot_in + workspace->in_buf.pos < |
| 433 | tot_out + workspace->out_buf.pos) { |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 434 | ret = -E2BIG; |
| 435 | goto out; |
| 436 | } |
| 437 | |
| 438 | /* We've reached the end of our output range */ |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 439 | if (workspace->out_buf.pos >= max_out) { |
| 440 | tot_out += workspace->out_buf.pos; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 441 | ret = -E2BIG; |
| 442 | goto out; |
| 443 | } |
| 444 | |
| 445 | /* Check if we need more output space */ |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 446 | if (workspace->out_buf.pos == workspace->out_buf.size) { |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 447 | tot_out += PAGE_SIZE; |
| 448 | max_out -= PAGE_SIZE; |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 449 | kunmap(out_page); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 450 | if (nr_pages == nr_dest_pages) { |
| 451 | out_page = NULL; |
| 452 | ret = -E2BIG; |
| 453 | goto out; |
| 454 | } |
David Sterba | b0ee5e1 | 2021-06-14 22:22:22 +0200 | [diff] [blame] | 455 | out_page = alloc_page(GFP_NOFS); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 456 | if (out_page == NULL) { |
| 457 | ret = -ENOMEM; |
| 458 | goto out; |
| 459 | } |
| 460 | pages[nr_pages++] = out_page; |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 461 | workspace->out_buf.dst = kmap(out_page); |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 462 | workspace->out_buf.pos = 0; |
| 463 | workspace->out_buf.size = min_t(size_t, max_out, |
| 464 | PAGE_SIZE); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 465 | } |
| 466 | |
| 467 | /* We've reached the end of the input */ |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 468 | if (workspace->in_buf.pos >= len) { |
| 469 | tot_in += workspace->in_buf.pos; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 470 | break; |
| 471 | } |
| 472 | |
| 473 | /* Check if we need more input */ |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 474 | if (workspace->in_buf.pos == workspace->in_buf.size) { |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 475 | tot_in += PAGE_SIZE; |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 476 | kunmap(in_page); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 477 | put_page(in_page); |
| 478 | |
| 479 | start += PAGE_SIZE; |
| 480 | len -= PAGE_SIZE; |
| 481 | in_page = find_get_page(mapping, start >> PAGE_SHIFT); |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 482 | workspace->in_buf.src = kmap(in_page); |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 483 | workspace->in_buf.pos = 0; |
| 484 | workspace->in_buf.size = min_t(size_t, len, PAGE_SIZE); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 485 | } |
| 486 | } |
| 487 | while (1) { |
| 488 | size_t ret2; |
| 489 | |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 490 | ret2 = zstd_end_stream(stream, &workspace->out_buf); |
| 491 | if (zstd_is_error(ret2)) { |
| 492 | pr_debug("BTRFS: zstd_end_stream returned %d\n", |
| 493 | zstd_get_error_code(ret2)); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 494 | ret = -EIO; |
| 495 | goto out; |
| 496 | } |
| 497 | if (ret2 == 0) { |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 498 | tot_out += workspace->out_buf.pos; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 499 | break; |
| 500 | } |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 501 | if (workspace->out_buf.pos >= max_out) { |
| 502 | tot_out += workspace->out_buf.pos; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 503 | ret = -E2BIG; |
| 504 | goto out; |
| 505 | } |
| 506 | |
| 507 | tot_out += PAGE_SIZE; |
| 508 | max_out -= PAGE_SIZE; |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 509 | kunmap(out_page); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 510 | if (nr_pages == nr_dest_pages) { |
| 511 | out_page = NULL; |
| 512 | ret = -E2BIG; |
| 513 | goto out; |
| 514 | } |
David Sterba | b0ee5e1 | 2021-06-14 22:22:22 +0200 | [diff] [blame] | 515 | out_page = alloc_page(GFP_NOFS); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 516 | if (out_page == NULL) { |
| 517 | ret = -ENOMEM; |
| 518 | goto out; |
| 519 | } |
| 520 | pages[nr_pages++] = out_page; |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 521 | workspace->out_buf.dst = kmap(out_page); |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 522 | workspace->out_buf.pos = 0; |
| 523 | workspace->out_buf.size = min_t(size_t, max_out, PAGE_SIZE); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 524 | } |
| 525 | |
| 526 | if (tot_out >= tot_in) { |
| 527 | ret = -E2BIG; |
| 528 | goto out; |
| 529 | } |
| 530 | |
| 531 | ret = 0; |
| 532 | *total_in = tot_in; |
| 533 | *total_out = tot_out; |
| 534 | out: |
| 535 | *out_pages = nr_pages; |
| 536 | /* Cleanup */ |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 537 | if (in_page) { |
| 538 | kunmap(in_page); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 539 | put_page(in_page); |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 540 | } |
| 541 | if (out_page) |
| 542 | kunmap(out_page); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 543 | return ret; |
| 544 | } |
| 545 | |
David Sterba | c4bf665 | 2019-10-01 22:38:34 +0200 | [diff] [blame] | 546 | int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 547 | { |
| 548 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
| 549 | struct page **pages_in = cb->compressed_pages; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 550 | size_t srclen = cb->compressed_len; |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 551 | zstd_dstream *stream; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 552 | int ret = 0; |
| 553 | unsigned long page_in_index = 0; |
| 554 | unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); |
| 555 | unsigned long buf_start; |
| 556 | unsigned long total_out = 0; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 557 | |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 558 | stream = zstd_init_dstream( |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 559 | ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); |
| 560 | if (!stream) { |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 561 | pr_debug("BTRFS: zstd_init_dstream failed\n"); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 562 | ret = -EIO; |
| 563 | goto done; |
| 564 | } |
| 565 | |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 566 | workspace->in_buf.src = kmap(pages_in[page_in_index]); |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 567 | workspace->in_buf.pos = 0; |
| 568 | workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 569 | |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 570 | workspace->out_buf.dst = workspace->buf; |
| 571 | workspace->out_buf.pos = 0; |
| 572 | workspace->out_buf.size = PAGE_SIZE; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 573 | |
| 574 | while (1) { |
| 575 | size_t ret2; |
| 576 | |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 577 | ret2 = zstd_decompress_stream(stream, &workspace->out_buf, |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 578 | &workspace->in_buf); |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 579 | if (zstd_is_error(ret2)) { |
| 580 | pr_debug("BTRFS: zstd_decompress_stream returned %d\n", |
| 581 | zstd_get_error_code(ret2)); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 582 | ret = -EIO; |
| 583 | goto done; |
| 584 | } |
| 585 | buf_start = total_out; |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 586 | total_out += workspace->out_buf.pos; |
| 587 | workspace->out_buf.pos = 0; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 588 | |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 589 | ret = btrfs_decompress_buf2page(workspace->out_buf.dst, |
Qu Wenruo | 1c3dc17 | 2021-07-05 10:00:58 +0800 | [diff] [blame] | 590 | total_out - buf_start, cb, buf_start); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 591 | if (ret == 0) |
| 592 | break; |
| 593 | |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 594 | if (workspace->in_buf.pos >= srclen) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 595 | break; |
| 596 | |
| 597 | /* Check if we've hit the end of a frame */ |
| 598 | if (ret2 == 0) |
| 599 | break; |
| 600 | |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 601 | if (workspace->in_buf.pos == workspace->in_buf.size) { |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 602 | kunmap(pages_in[page_in_index++]); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 603 | if (page_in_index >= total_pages_in) { |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 604 | workspace->in_buf.src = NULL; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 605 | ret = -EIO; |
| 606 | goto done; |
| 607 | } |
| 608 | srclen -= PAGE_SIZE; |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 609 | workspace->in_buf.src = kmap(pages_in[page_in_index]); |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 610 | workspace->in_buf.pos = 0; |
| 611 | workspace->in_buf.size = min_t(size_t, srclen, PAGE_SIZE); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 612 | } |
| 613 | } |
| 614 | ret = 0; |
Qu Wenruo | 1c3dc17 | 2021-07-05 10:00:58 +0800 | [diff] [blame] | 615 | zero_fill_bio(cb->orig_bio); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 616 | done: |
David Sterba | 56ee254 | 2021-10-27 10:42:27 +0200 | [diff] [blame] | 617 | if (workspace->in_buf.src) |
| 618 | kunmap(pages_in[page_in_index]); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 619 | return ret; |
| 620 | } |
| 621 | |
David Sterba | c4bf665 | 2019-10-01 22:38:34 +0200 | [diff] [blame] | 622 | int zstd_decompress(struct list_head *ws, unsigned char *data_in, |
| 623 | struct page *dest_page, unsigned long start_byte, size_t srclen, |
| 624 | size_t destlen) |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 625 | { |
| 626 | struct workspace *workspace = list_entry(ws, struct workspace, list); |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 627 | zstd_dstream *stream; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 628 | int ret = 0; |
| 629 | size_t ret2; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 630 | unsigned long total_out = 0; |
| 631 | unsigned long pg_offset = 0; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 632 | |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 633 | stream = zstd_init_dstream( |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 634 | ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); |
| 635 | if (!stream) { |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 636 | pr_warn("BTRFS: zstd_init_dstream failed\n"); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 637 | ret = -EIO; |
| 638 | goto finish; |
| 639 | } |
| 640 | |
| 641 | destlen = min_t(size_t, destlen, PAGE_SIZE); |
| 642 | |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 643 | workspace->in_buf.src = data_in; |
| 644 | workspace->in_buf.pos = 0; |
| 645 | workspace->in_buf.size = srclen; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 646 | |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 647 | workspace->out_buf.dst = workspace->buf; |
| 648 | workspace->out_buf.pos = 0; |
| 649 | workspace->out_buf.size = PAGE_SIZE; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 650 | |
| 651 | ret2 = 1; |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 652 | while (pg_offset < destlen |
| 653 | && workspace->in_buf.pos < workspace->in_buf.size) { |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 654 | unsigned long buf_start; |
| 655 | unsigned long buf_offset; |
| 656 | unsigned long bytes; |
| 657 | |
| 658 | /* Check if the frame is over and we still need more input */ |
| 659 | if (ret2 == 0) { |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 660 | pr_debug("BTRFS: zstd_decompress_stream ended early\n"); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 661 | ret = -EIO; |
| 662 | goto finish; |
| 663 | } |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 664 | ret2 = zstd_decompress_stream(stream, &workspace->out_buf, |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 665 | &workspace->in_buf); |
Nick Terrell | cf30f6a | 2020-09-11 16:49:00 -0700 | [diff] [blame] | 666 | if (zstd_is_error(ret2)) { |
| 667 | pr_debug("BTRFS: zstd_decompress_stream returned %d\n", |
| 668 | zstd_get_error_code(ret2)); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 669 | ret = -EIO; |
| 670 | goto finish; |
| 671 | } |
| 672 | |
| 673 | buf_start = total_out; |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 674 | total_out += workspace->out_buf.pos; |
| 675 | workspace->out_buf.pos = 0; |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 676 | |
| 677 | if (total_out <= start_byte) |
| 678 | continue; |
| 679 | |
| 680 | if (total_out > start_byte && buf_start < start_byte) |
| 681 | buf_offset = start_byte - buf_start; |
| 682 | else |
| 683 | buf_offset = 0; |
| 684 | |
| 685 | bytes = min_t(unsigned long, destlen - pg_offset, |
David Sterba | 431e982 | 2017-11-15 18:27:39 +0100 | [diff] [blame] | 686 | workspace->out_buf.size - buf_offset); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 687 | |
Ira Weiny | 3590ec5 | 2021-02-09 22:22:19 -0800 | [diff] [blame] | 688 | memcpy_to_page(dest_page, pg_offset, |
| 689 | workspace->out_buf.dst + buf_offset, bytes); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 690 | |
| 691 | pg_offset += bytes; |
| 692 | } |
| 693 | ret = 0; |
| 694 | finish: |
| 695 | if (pg_offset < destlen) { |
Ira Weiny | d048b9c | 2021-05-04 18:40:07 -0700 | [diff] [blame] | 696 | memzero_page(dest_page, pg_offset, destlen - pg_offset); |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 697 | } |
| 698 | return ret; |
| 699 | } |
| 700 | |
| 701 | const struct btrfs_compress_op btrfs_zstd_compress = { |
David Sterba | be951045 | 2019-10-02 00:53:31 +0200 | [diff] [blame] | 702 | /* ZSTD uses own workspace manager */ |
| 703 | .workspace_manager = NULL, |
David Sterba | e18333a | 2019-08-09 16:25:34 +0200 | [diff] [blame] | 704 | .max_level = ZSTD_BTRFS_MAX_LEVEL, |
| 705 | .default_level = ZSTD_BTRFS_DEFAULT_LEVEL, |
Nick Terrell | 5c1aab1 | 2017-08-09 19:39:02 -0700 | [diff] [blame] | 706 | }; |