blob: 7a4abd303fa07a5db785527740a23c39f7c52459 [file] [log] [blame]
Chao Yu4c8ff702019-11-01 18:07:14 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * f2fs compress support
4 *
5 * Copyright (c) 2019 Chao Yu <chao@kernel.org>
6 */
7
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/writeback.h>
11#include <linux/backing-dev.h>
12#include <linux/lzo.h>
13#include <linux/lz4.h>
Chao Yu50cfa662020-03-03 17:46:02 +080014#include <linux/zstd.h>
Chao Yuec3ea142021-05-20 19:51:50 +080015#include <linux/pagevec.h>
Chao Yu4c8ff702019-11-01 18:07:14 +080016
17#include "f2fs.h"
18#include "node.h"
Chao Yuec3ea142021-05-20 19:51:50 +080019#include "segment.h"
Chao Yu4c8ff702019-11-01 18:07:14 +080020#include <trace/events/f2fs.h>
21
Chao Yuc68d6c82020-09-14 17:05:14 +080022static struct kmem_cache *cic_entry_slab;
23static struct kmem_cache *dic_entry_slab;
24
Chao Yu31083032020-09-14 17:05:13 +080025static void *page_array_alloc(struct inode *inode, int nr)
26{
27 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
28 unsigned int size = sizeof(struct page *) * nr;
29
30 if (likely(size <= sbi->page_array_slab_size))
31 return kmem_cache_zalloc(sbi->page_array_slab, GFP_NOFS);
32 return f2fs_kzalloc(sbi, size, GFP_NOFS);
33}
34
35static void page_array_free(struct inode *inode, void *pages, int nr)
36{
37 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
38 unsigned int size = sizeof(struct page *) * nr;
39
40 if (!pages)
41 return;
42
43 if (likely(size <= sbi->page_array_slab_size))
44 kmem_cache_free(sbi->page_array_slab, pages);
45 else
46 kfree(pages);
47}
48
Chao Yu4c8ff702019-11-01 18:07:14 +080049struct f2fs_compress_ops {
50 int (*init_compress_ctx)(struct compress_ctx *cc);
51 void (*destroy_compress_ctx)(struct compress_ctx *cc);
52 int (*compress_pages)(struct compress_ctx *cc);
Chao Yu23b1faaad2020-03-03 16:57:07 +080053 int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
54 void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
Chao Yu4c8ff702019-11-01 18:07:14 +080055 int (*decompress_pages)(struct decompress_io_ctx *dic);
56};
57
58static unsigned int offset_in_cluster(struct compress_ctx *cc, pgoff_t index)
59{
60 return index & (cc->cluster_size - 1);
61}
62
63static pgoff_t cluster_idx(struct compress_ctx *cc, pgoff_t index)
64{
65 return index >> cc->log_cluster_size;
66}
67
68static pgoff_t start_idx_of_cluster(struct compress_ctx *cc)
69{
70 return cc->cluster_idx << cc->log_cluster_size;
71}
72
73bool f2fs_is_compressed_page(struct page *page)
74{
75 if (!PagePrivate(page))
76 return false;
77 if (!page_private(page))
78 return false;
Chao Yubdc14e12021-04-28 17:20:31 +080079 if (page_private_nonpointer(page))
Chao Yu4c8ff702019-11-01 18:07:14 +080080 return false;
Yu Changchun29b993c2020-06-20 03:58:29 -040081
Chao Yu4c8ff702019-11-01 18:07:14 +080082 f2fs_bug_on(F2FS_M_SB(page->mapping),
83 *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC);
84 return true;
85}
86
87static void f2fs_set_compressed_page(struct page *page,
Chao Yu887347a2020-03-28 17:33:23 +080088 struct inode *inode, pgoff_t index, void *data)
Chao Yu4c8ff702019-11-01 18:07:14 +080089{
Chao Yubdc14e12021-04-28 17:20:31 +080090 attach_page_private(page, (void *)data);
Chao Yu4c8ff702019-11-01 18:07:14 +080091
92 /* i_crypto_info and iv index */
93 page->index = index;
94 page->mapping = inode->i_mapping;
Chao Yu4c8ff702019-11-01 18:07:14 +080095}
96
Chao Yu4c8ff702019-11-01 18:07:14 +080097static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
98{
99 int i;
100
101 for (i = 0; i < len; i++) {
102 if (!cc->rpages[i])
103 continue;
104 if (unlock)
105 unlock_page(cc->rpages[i]);
106 else
107 put_page(cc->rpages[i]);
108 }
109}
110
111static void f2fs_put_rpages(struct compress_ctx *cc)
112{
113 f2fs_drop_rpages(cc, cc->cluster_size, false);
114}
115
116static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
117{
118 f2fs_drop_rpages(cc, len, true);
119}
120
Chao Yu4c8ff702019-11-01 18:07:14 +0800121static void f2fs_put_rpages_wbc(struct compress_ctx *cc,
122 struct writeback_control *wbc, bool redirty, int unlock)
123{
124 unsigned int i;
125
126 for (i = 0; i < cc->cluster_size; i++) {
127 if (!cc->rpages[i])
128 continue;
129 if (redirty)
130 redirty_page_for_writepage(wbc, cc->rpages[i]);
131 f2fs_put_page(cc->rpages[i], unlock);
132 }
133}
134
135struct page *f2fs_compress_control_page(struct page *page)
136{
137 return ((struct compress_io_ctx *)page_private(page))->rpages[0];
138}
139
140int f2fs_init_compress_ctx(struct compress_ctx *cc)
141{
Jaegeuk Kimadfc6942020-09-23 00:54:50 -0700142 if (cc->rpages)
Chao Yu4c8ff702019-11-01 18:07:14 +0800143 return 0;
144
Chao Yu31083032020-09-14 17:05:13 +0800145 cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +0800146 return cc->rpages ? 0 : -ENOMEM;
147}
148
Chao Yu4c4dcb82021-05-10 17:30:32 +0800149void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
Chao Yu4c8ff702019-11-01 18:07:14 +0800150{
Chao Yu31083032020-09-14 17:05:13 +0800151 page_array_free(cc->inode, cc->rpages, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +0800152 cc->rpages = NULL;
153 cc->nr_rpages = 0;
154 cc->nr_cpages = 0;
Chao Yu4c4dcb82021-05-10 17:30:32 +0800155 if (!reuse)
156 cc->cluster_idx = NULL_CLUSTER;
Chao Yu4c8ff702019-11-01 18:07:14 +0800157}
158
159void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page)
160{
161 unsigned int cluster_ofs;
162
163 if (!f2fs_cluster_can_merge_page(cc, page->index))
164 f2fs_bug_on(F2FS_I_SB(cc->inode), 1);
165
166 cluster_ofs = offset_in_cluster(cc, page->index);
167 cc->rpages[cluster_ofs] = page;
168 cc->nr_rpages++;
169 cc->cluster_idx = cluster_idx(cc, page->index);
170}
171
172#ifdef CONFIG_F2FS_FS_LZO
173static int lzo_init_compress_ctx(struct compress_ctx *cc)
174{
175 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
176 LZO1X_MEM_COMPRESS, GFP_NOFS);
177 if (!cc->private)
178 return -ENOMEM;
179
180 cc->clen = lzo1x_worst_compress(PAGE_SIZE << cc->log_cluster_size);
181 return 0;
182}
183
184static void lzo_destroy_compress_ctx(struct compress_ctx *cc)
185{
186 kvfree(cc->private);
187 cc->private = NULL;
188}
189
190static int lzo_compress_pages(struct compress_ctx *cc)
191{
192 int ret;
193
194 ret = lzo1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
195 &cc->clen, cc->private);
196 if (ret != LZO_E_OK) {
197 printk_ratelimited("%sF2FS-fs (%s): lzo compress failed, ret:%d\n",
198 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
199 return -EIO;
200 }
201 return 0;
202}
203
204static int lzo_decompress_pages(struct decompress_io_ctx *dic)
205{
206 int ret;
207
208 ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
209 dic->rbuf, &dic->rlen);
210 if (ret != LZO_E_OK) {
211 printk_ratelimited("%sF2FS-fs (%s): lzo decompress failed, ret:%d\n",
212 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
213 return -EIO;
214 }
215
216 if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
217 printk_ratelimited("%sF2FS-fs (%s): lzo invalid rlen:%zu, "
218 "expected:%lu\n", KERN_ERR,
219 F2FS_I_SB(dic->inode)->sb->s_id,
220 dic->rlen,
221 PAGE_SIZE << dic->log_cluster_size);
222 return -EIO;
223 }
224 return 0;
225}
226
227static const struct f2fs_compress_ops f2fs_lzo_ops = {
228 .init_compress_ctx = lzo_init_compress_ctx,
229 .destroy_compress_ctx = lzo_destroy_compress_ctx,
230 .compress_pages = lzo_compress_pages,
231 .decompress_pages = lzo_decompress_pages,
232};
233#endif
234
235#ifdef CONFIG_F2FS_FS_LZ4
236static int lz4_init_compress_ctx(struct compress_ctx *cc)
237{
Chao Yud0d83de2021-01-22 17:46:43 +0800238 unsigned int size = LZ4_MEM_COMPRESS;
239
240#ifdef CONFIG_F2FS_FS_LZ4HC
241 if (F2FS_I(cc->inode)->i_compress_flag >> COMPRESS_LEVEL_OFFSET)
242 size = LZ4HC_MEM_COMPRESS;
243#endif
244
245 cc->private = f2fs_kvmalloc(F2FS_I_SB(cc->inode), size, GFP_NOFS);
Chao Yu4c8ff702019-11-01 18:07:14 +0800246 if (!cc->private)
247 return -ENOMEM;
248
Chao Yuf6644142020-05-09 15:01:04 +0800249 /*
250 * we do not change cc->clen to LZ4_compressBound(inputsize) to
251 * adapt worst compress case, because lz4 compressor can handle
252 * output budget properly.
253 */
254 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
Chao Yu4c8ff702019-11-01 18:07:14 +0800255 return 0;
256}
257
258static void lz4_destroy_compress_ctx(struct compress_ctx *cc)
259{
260 kvfree(cc->private);
261 cc->private = NULL;
262}
263
Chao Yud0d83de2021-01-22 17:46:43 +0800264#ifdef CONFIG_F2FS_FS_LZ4HC
265static int lz4hc_compress_pages(struct compress_ctx *cc)
266{
267 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
268 COMPRESS_LEVEL_OFFSET;
269 int len;
270
271 if (level)
272 len = LZ4_compress_HC(cc->rbuf, cc->cbuf->cdata, cc->rlen,
273 cc->clen, level, cc->private);
274 else
275 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
276 cc->clen, cc->private);
277 if (!len)
278 return -EAGAIN;
279
280 cc->clen = len;
281 return 0;
282}
283#endif
284
Chao Yu4c8ff702019-11-01 18:07:14 +0800285static int lz4_compress_pages(struct compress_ctx *cc)
286{
287 int len;
288
Chao Yud0d83de2021-01-22 17:46:43 +0800289#ifdef CONFIG_F2FS_FS_LZ4HC
290 return lz4hc_compress_pages(cc);
291#endif
Chao Yu4c8ff702019-11-01 18:07:14 +0800292 len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
293 cc->clen, cc->private);
Chao Yuf6644142020-05-09 15:01:04 +0800294 if (!len)
295 return -EAGAIN;
296
Chao Yu4c8ff702019-11-01 18:07:14 +0800297 cc->clen = len;
298 return 0;
299}
300
301static int lz4_decompress_pages(struct decompress_io_ctx *dic)
302{
303 int ret;
304
305 ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
306 dic->clen, dic->rlen);
307 if (ret < 0) {
308 printk_ratelimited("%sF2FS-fs (%s): lz4 decompress failed, ret:%d\n",
309 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id, ret);
310 return -EIO;
311 }
312
313 if (ret != PAGE_SIZE << dic->log_cluster_size) {
Chao Yuf7e8aff2022-03-16 18:20:00 +0800314 printk_ratelimited("%sF2FS-fs (%s): lz4 invalid ret:%d, "
Chao Yu4c8ff702019-11-01 18:07:14 +0800315 "expected:%lu\n", KERN_ERR,
Chao Yuf7e8aff2022-03-16 18:20:00 +0800316 F2FS_I_SB(dic->inode)->sb->s_id, ret,
Chao Yu4c8ff702019-11-01 18:07:14 +0800317 PAGE_SIZE << dic->log_cluster_size);
318 return -EIO;
319 }
320 return 0;
321}
322
323static const struct f2fs_compress_ops f2fs_lz4_ops = {
324 .init_compress_ctx = lz4_init_compress_ctx,
325 .destroy_compress_ctx = lz4_destroy_compress_ctx,
326 .compress_pages = lz4_compress_pages,
327 .decompress_pages = lz4_decompress_pages,
328};
329#endif
330
Chao Yu50cfa662020-03-03 17:46:02 +0800331#ifdef CONFIG_F2FS_FS_ZSTD
332#define F2FS_ZSTD_DEFAULT_CLEVEL 1
333
334static int zstd_init_compress_ctx(struct compress_ctx *cc)
335{
336 ZSTD_parameters params;
337 ZSTD_CStream *stream;
338 void *workspace;
339 unsigned int workspace_size;
Chao Yud0d83de2021-01-22 17:46:43 +0800340 unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
341 COMPRESS_LEVEL_OFFSET;
Chao Yu50cfa662020-03-03 17:46:02 +0800342
Chao Yud0d83de2021-01-22 17:46:43 +0800343 if (!level)
344 level = F2FS_ZSTD_DEFAULT_CLEVEL;
345
346 params = ZSTD_getParams(level, cc->rlen, 0);
Chao Yu50cfa662020-03-03 17:46:02 +0800347 workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
348
349 workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
350 workspace_size, GFP_NOFS);
351 if (!workspace)
352 return -ENOMEM;
353
354 stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
355 if (!stream) {
356 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
357 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
358 __func__);
359 kvfree(workspace);
360 return -EIO;
361 }
362
363 cc->private = workspace;
364 cc->private2 = stream;
365
366 cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
367 return 0;
368}
369
370static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
371{
372 kvfree(cc->private);
373 cc->private = NULL;
374 cc->private2 = NULL;
375}
376
377static int zstd_compress_pages(struct compress_ctx *cc)
378{
379 ZSTD_CStream *stream = cc->private2;
380 ZSTD_inBuffer inbuf;
381 ZSTD_outBuffer outbuf;
382 int src_size = cc->rlen;
383 int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
384 int ret;
385
386 inbuf.pos = 0;
387 inbuf.src = cc->rbuf;
388 inbuf.size = src_size;
389
390 outbuf.pos = 0;
391 outbuf.dst = cc->cbuf->cdata;
392 outbuf.size = dst_size;
393
394 ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
395 if (ZSTD_isError(ret)) {
396 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
397 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
398 __func__, ZSTD_getErrorCode(ret));
399 return -EIO;
400 }
401
402 ret = ZSTD_endStream(stream, &outbuf);
403 if (ZSTD_isError(ret)) {
404 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
405 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
406 __func__, ZSTD_getErrorCode(ret));
407 return -EIO;
408 }
409
Chao Yu1454c972020-05-08 09:16:03 +0800410 /*
411 * there is compressed data remained in intermediate buffer due to
412 * no more space in cbuf.cdata
413 */
414 if (ret)
415 return -EAGAIN;
416
Chao Yu50cfa662020-03-03 17:46:02 +0800417 cc->clen = outbuf.pos;
418 return 0;
419}
420
421static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
422{
423 ZSTD_DStream *stream;
424 void *workspace;
425 unsigned int workspace_size;
Chao Yu0e2b7382020-09-02 15:01:52 +0800426 unsigned int max_window_size =
427 MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
Chao Yu50cfa662020-03-03 17:46:02 +0800428
Chao Yu0e2b7382020-09-02 15:01:52 +0800429 workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
Chao Yu50cfa662020-03-03 17:46:02 +0800430
431 workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
432 workspace_size, GFP_NOFS);
433 if (!workspace)
434 return -ENOMEM;
435
Chao Yu0e2b7382020-09-02 15:01:52 +0800436 stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
Chao Yu50cfa662020-03-03 17:46:02 +0800437 if (!stream) {
438 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
439 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
440 __func__);
441 kvfree(workspace);
442 return -EIO;
443 }
444
445 dic->private = workspace;
446 dic->private2 = stream;
447
448 return 0;
449}
450
451static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
452{
453 kvfree(dic->private);
454 dic->private = NULL;
455 dic->private2 = NULL;
456}
457
458static int zstd_decompress_pages(struct decompress_io_ctx *dic)
459{
460 ZSTD_DStream *stream = dic->private2;
461 ZSTD_inBuffer inbuf;
462 ZSTD_outBuffer outbuf;
463 int ret;
464
465 inbuf.pos = 0;
466 inbuf.src = dic->cbuf->cdata;
467 inbuf.size = dic->clen;
468
469 outbuf.pos = 0;
470 outbuf.dst = dic->rbuf;
471 outbuf.size = dic->rlen;
472
473 ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
474 if (ZSTD_isError(ret)) {
475 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
476 KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
477 __func__, ZSTD_getErrorCode(ret));
478 return -EIO;
479 }
480
481 if (dic->rlen != outbuf.pos) {
482 printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
483 "expected:%lu\n", KERN_ERR,
484 F2FS_I_SB(dic->inode)->sb->s_id,
485 __func__, dic->rlen,
486 PAGE_SIZE << dic->log_cluster_size);
487 return -EIO;
488 }
489
490 return 0;
491}
492
493static const struct f2fs_compress_ops f2fs_zstd_ops = {
494 .init_compress_ctx = zstd_init_compress_ctx,
495 .destroy_compress_ctx = zstd_destroy_compress_ctx,
496 .compress_pages = zstd_compress_pages,
497 .init_decompress_ctx = zstd_init_decompress_ctx,
498 .destroy_decompress_ctx = zstd_destroy_decompress_ctx,
499 .decompress_pages = zstd_decompress_pages,
500};
501#endif
502
Chao Yu6d92b202020-04-08 19:56:32 +0800503#ifdef CONFIG_F2FS_FS_LZO
504#ifdef CONFIG_F2FS_FS_LZORLE
505static int lzorle_compress_pages(struct compress_ctx *cc)
506{
507 int ret;
508
509 ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
510 &cc->clen, cc->private);
511 if (ret != LZO_E_OK) {
512 printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
513 KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
514 return -EIO;
515 }
516 return 0;
517}
518
519static const struct f2fs_compress_ops f2fs_lzorle_ops = {
520 .init_compress_ctx = lzo_init_compress_ctx,
521 .destroy_compress_ctx = lzo_destroy_compress_ctx,
522 .compress_pages = lzorle_compress_pages,
523 .decompress_pages = lzo_decompress_pages,
524};
525#endif
526#endif
527
Chao Yu4c8ff702019-11-01 18:07:14 +0800528static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
529#ifdef CONFIG_F2FS_FS_LZO
530 &f2fs_lzo_ops,
531#else
532 NULL,
533#endif
534#ifdef CONFIG_F2FS_FS_LZ4
535 &f2fs_lz4_ops,
536#else
537 NULL,
538#endif
Chao Yu50cfa662020-03-03 17:46:02 +0800539#ifdef CONFIG_F2FS_FS_ZSTD
540 &f2fs_zstd_ops,
541#else
542 NULL,
543#endif
Chao Yu6d92b202020-04-08 19:56:32 +0800544#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
545 &f2fs_lzorle_ops,
546#else
547 NULL,
548#endif
Chao Yu4c8ff702019-11-01 18:07:14 +0800549};
550
551bool f2fs_is_compress_backend_ready(struct inode *inode)
552{
553 if (!f2fs_compressed_file(inode))
554 return true;
555 return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
556}
557
Jaegeuk Kim99bbe302020-06-16 09:56:51 -0700558static mempool_t *compress_page_pool;
Chao Yu5e6bbde2020-04-08 19:56:05 +0800559static int num_compress_pages = 512;
560module_param(num_compress_pages, uint, 0444);
561MODULE_PARM_DESC(num_compress_pages,
562 "Number of intermediate compress pages to preallocate");
563
564int f2fs_init_compress_mempool(void)
565{
566 compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
567 if (!compress_page_pool)
568 return -ENOMEM;
569
570 return 0;
571}
572
573void f2fs_destroy_compress_mempool(void)
574{
575 mempool_destroy(compress_page_pool);
576}
577
578static struct page *f2fs_compress_alloc_page(void)
Chao Yu4c8ff702019-11-01 18:07:14 +0800579{
580 struct page *page;
581
Chao Yu5e6bbde2020-04-08 19:56:05 +0800582 page = mempool_alloc(compress_page_pool, GFP_NOFS);
Chao Yu4c8ff702019-11-01 18:07:14 +0800583 lock_page(page);
Chao Yu5e6bbde2020-04-08 19:56:05 +0800584
Chao Yu4c8ff702019-11-01 18:07:14 +0800585 return page;
586}
587
Chao Yu5e6bbde2020-04-08 19:56:05 +0800588static void f2fs_compress_free_page(struct page *page)
589{
590 if (!page)
591 return;
Chao Yubdc14e12021-04-28 17:20:31 +0800592 detach_page_private(page);
Chao Yu5e6bbde2020-04-08 19:56:05 +0800593 page->mapping = NULL;
594 unlock_page(page);
595 mempool_free(page, compress_page_pool);
596}
597
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900598#define MAX_VMAP_RETRIES 3
599
600static void *f2fs_vmap(struct page **pages, unsigned int count)
601{
602 int i;
603 void *buf = NULL;
604
605 for (i = 0; i < MAX_VMAP_RETRIES; i++) {
606 buf = vm_map_ram(pages, count, -1);
607 if (buf)
608 break;
609 vm_unmap_aliases();
610 }
611 return buf;
612}
613
Chao Yu4c8ff702019-11-01 18:07:14 +0800614static int f2fs_compress_pages(struct compress_ctx *cc)
615{
Chao Yu4c8ff702019-11-01 18:07:14 +0800616 struct f2fs_inode_info *fi = F2FS_I(cc->inode);
617 const struct f2fs_compress_ops *cops =
618 f2fs_cops[fi->i_compress_algorithm];
Chao Yu31083032020-09-14 17:05:13 +0800619 unsigned int max_len, new_nr_cpages;
620 struct page **new_cpages;
Chao Yu1f004d62020-11-26 18:32:09 +0800621 u32 chksum = 0;
Chao Yu4c8ff702019-11-01 18:07:14 +0800622 int i, ret;
623
624 trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
625 cc->cluster_size, fi->i_compress_algorithm);
626
Chao Yu23b1faaad2020-03-03 16:57:07 +0800627 if (cops->init_compress_ctx) {
628 ret = cops->init_compress_ctx(cc);
629 if (ret)
630 goto out;
631 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800632
633 max_len = COMPRESS_HEADER_SIZE + cc->clen;
634 cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
635
Chao Yu31083032020-09-14 17:05:13 +0800636 cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +0800637 if (!cc->cpages) {
638 ret = -ENOMEM;
639 goto destroy_compress_ctx;
640 }
641
642 for (i = 0; i < cc->nr_cpages; i++) {
Chao Yu5e6bbde2020-04-08 19:56:05 +0800643 cc->cpages[i] = f2fs_compress_alloc_page();
Chao Yu4c8ff702019-11-01 18:07:14 +0800644 if (!cc->cpages[i]) {
645 ret = -ENOMEM;
646 goto out_free_cpages;
647 }
648 }
649
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900650 cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +0800651 if (!cc->rbuf) {
652 ret = -ENOMEM;
653 goto out_free_cpages;
654 }
655
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900656 cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +0800657 if (!cc->cbuf) {
658 ret = -ENOMEM;
659 goto out_vunmap_rbuf;
660 }
661
662 ret = cops->compress_pages(cc);
663 if (ret)
664 goto out_vunmap_cbuf;
665
666 max_len = PAGE_SIZE * (cc->cluster_size - 1) - COMPRESS_HEADER_SIZE;
667
668 if (cc->clen > max_len) {
669 ret = -EAGAIN;
670 goto out_vunmap_cbuf;
671 }
672
673 cc->cbuf->clen = cpu_to_le32(cc->clen);
Chao Yu4c8ff702019-11-01 18:07:14 +0800674
Chao Yu1f004d62020-11-26 18:32:09 +0800675 if (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)
676 chksum = f2fs_crc32(F2FS_I_SB(cc->inode),
677 cc->cbuf->cdata, cc->clen);
678 cc->cbuf->chksum = cpu_to_le32(chksum);
679
Chao Yu4c8ff702019-11-01 18:07:14 +0800680 for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
681 cc->cbuf->reserved[i] = cpu_to_le32(0);
682
Chao Yu31083032020-09-14 17:05:13 +0800683 new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
684
685 /* Now we're going to cut unnecessary tail pages */
686 new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
687 if (!new_cpages) {
688 ret = -ENOMEM;
689 goto out_vunmap_cbuf;
690 }
Eric Biggers7fa6d592020-02-20 20:50:37 -0800691
692 /* zero out any unused part of the last page */
693 memset(&cc->cbuf->cdata[cc->clen], 0,
Chao Yu31083032020-09-14 17:05:13 +0800694 (new_nr_cpages * PAGE_SIZE) -
695 (cc->clen + COMPRESS_HEADER_SIZE));
Eric Biggers7fa6d592020-02-20 20:50:37 -0800696
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900697 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
698 vm_unmap_ram(cc->rbuf, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +0800699
Chao Yu31083032020-09-14 17:05:13 +0800700 for (i = 0; i < cc->nr_cpages; i++) {
701 if (i < new_nr_cpages) {
702 new_cpages[i] = cc->cpages[i];
703 continue;
704 }
Chao Yu5e6bbde2020-04-08 19:56:05 +0800705 f2fs_compress_free_page(cc->cpages[i]);
Chao Yu4c8ff702019-11-01 18:07:14 +0800706 cc->cpages[i] = NULL;
707 }
708
Chao Yu23b1faaad2020-03-03 16:57:07 +0800709 if (cops->destroy_compress_ctx)
710 cops->destroy_compress_ctx(cc);
Chao Yu09ff4802020-03-03 16:57:06 +0800711
Chao Yu31083032020-09-14 17:05:13 +0800712 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
713 cc->cpages = new_cpages;
714 cc->nr_cpages = new_nr_cpages;
Chao Yu4c8ff702019-11-01 18:07:14 +0800715
716 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
717 cc->clen, ret);
718 return 0;
719
720out_vunmap_cbuf:
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900721 vm_unmap_ram(cc->cbuf, cc->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +0800722out_vunmap_rbuf:
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900723 vm_unmap_ram(cc->rbuf, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +0800724out_free_cpages:
725 for (i = 0; i < cc->nr_cpages; i++) {
726 if (cc->cpages[i])
Chao Yu5e6bbde2020-04-08 19:56:05 +0800727 f2fs_compress_free_page(cc->cpages[i]);
Chao Yu4c8ff702019-11-01 18:07:14 +0800728 }
Chao Yu31083032020-09-14 17:05:13 +0800729 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +0800730 cc->cpages = NULL;
731destroy_compress_ctx:
Chao Yu23b1faaad2020-03-03 16:57:07 +0800732 if (cops->destroy_compress_ctx)
733 cops->destroy_compress_ctx(cc);
Chao Yu4c8ff702019-11-01 18:07:14 +0800734out:
735 trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
736 cc->clen, ret);
737 return ret;
738}
739
Chao Yuec3ea142021-05-20 19:51:50 +0800740void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
Chao Yu4c8ff702019-11-01 18:07:14 +0800741{
Chao Yu4c8ff702019-11-01 18:07:14 +0800742 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
Eric Biggersea29e652021-01-04 22:33:02 -0800743 struct f2fs_inode_info *fi = F2FS_I(dic->inode);
Chao Yu4c8ff702019-11-01 18:07:14 +0800744 const struct f2fs_compress_ops *cops =
745 f2fs_cops[fi->i_compress_algorithm];
746 int ret;
Chao Yub2f57a82020-07-25 09:17:48 +0800747 int i;
Chao Yu4c8ff702019-11-01 18:07:14 +0800748
Chao Yu4c8ff702019-11-01 18:07:14 +0800749 trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
750 dic->cluster_size, fi->i_compress_algorithm);
751
Chao Yu4c8ff702019-11-01 18:07:14 +0800752 if (dic->failed) {
753 ret = -EIO;
Eric Biggersea29e652021-01-04 22:33:02 -0800754 goto out_end_io;
Chao Yu4c8ff702019-11-01 18:07:14 +0800755 }
756
Chao Yu31083032020-09-14 17:05:13 +0800757 dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
Chao Yub2f57a82020-07-25 09:17:48 +0800758 if (!dic->tpages) {
759 ret = -ENOMEM;
Eric Biggersea29e652021-01-04 22:33:02 -0800760 goto out_end_io;
Chao Yub2f57a82020-07-25 09:17:48 +0800761 }
762
763 for (i = 0; i < dic->cluster_size; i++) {
764 if (dic->rpages[i]) {
765 dic->tpages[i] = dic->rpages[i];
766 continue;
767 }
768
769 dic->tpages[i] = f2fs_compress_alloc_page();
770 if (!dic->tpages[i]) {
771 ret = -ENOMEM;
Eric Biggersea29e652021-01-04 22:33:02 -0800772 goto out_end_io;
Chao Yub2f57a82020-07-25 09:17:48 +0800773 }
774 }
775
Chao Yu23b1faaad2020-03-03 16:57:07 +0800776 if (cops->init_decompress_ctx) {
777 ret = cops->init_decompress_ctx(dic);
778 if (ret)
Eric Biggersea29e652021-01-04 22:33:02 -0800779 goto out_end_io;
Chao Yu23b1faaad2020-03-03 16:57:07 +0800780 }
781
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900782 dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +0800783 if (!dic->rbuf) {
784 ret = -ENOMEM;
Eric Biggersea29e652021-01-04 22:33:02 -0800785 goto out_destroy_decompress_ctx;
Chao Yu4c8ff702019-11-01 18:07:14 +0800786 }
787
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900788 dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +0800789 if (!dic->cbuf) {
790 ret = -ENOMEM;
791 goto out_vunmap_rbuf;
792 }
793
794 dic->clen = le32_to_cpu(dic->cbuf->clen);
795 dic->rlen = PAGE_SIZE << dic->log_cluster_size;
796
797 if (dic->clen > PAGE_SIZE * dic->nr_cpages - COMPRESS_HEADER_SIZE) {
798 ret = -EFSCORRUPTED;
799 goto out_vunmap_cbuf;
800 }
801
802 ret = cops->decompress_pages(dic);
803
Chao Yu3ed3a942020-12-09 16:42:14 +0800804 if (!ret && (fi->i_compress_flag & 1 << COMPRESS_CHKSUM)) {
Chao Yu1f004d62020-11-26 18:32:09 +0800805 u32 provided = le32_to_cpu(dic->cbuf->chksum);
806 u32 calculated = f2fs_crc32(sbi, dic->cbuf->cdata, dic->clen);
807
808 if (provided != calculated) {
809 if (!is_inode_flag_set(dic->inode, FI_COMPRESS_CORRUPT)) {
810 set_inode_flag(dic->inode, FI_COMPRESS_CORRUPT);
811 printk_ratelimited(
812 "%sF2FS-fs (%s): checksum invalid, nid = %lu, %x vs %x",
813 KERN_INFO, sbi->sb->s_id, dic->inode->i_ino,
814 provided, calculated);
815 }
816 set_sbi_flag(sbi, SBI_NEED_FSCK);
Chao Yu1f004d62020-11-26 18:32:09 +0800817 }
818 }
819
Chao Yu4c8ff702019-11-01 18:07:14 +0800820out_vunmap_cbuf:
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900821 vm_unmap_ram(dic->cbuf, dic->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +0800822out_vunmap_rbuf:
Daeho Jeong6fcaeba2020-08-12 14:17:11 +0900823 vm_unmap_ram(dic->rbuf, dic->cluster_size);
Eric Biggersea29e652021-01-04 22:33:02 -0800824out_destroy_decompress_ctx:
Chao Yu23b1faaad2020-03-03 16:57:07 +0800825 if (cops->destroy_decompress_ctx)
826 cops->destroy_decompress_ctx(dic);
Eric Biggersea29e652021-01-04 22:33:02 -0800827out_end_io:
Chao Yu4c8ff702019-11-01 18:07:14 +0800828 trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
829 dic->clen, ret);
Eric Biggersea29e652021-01-04 22:33:02 -0800830 f2fs_decompress_end_io(dic, ret);
831}
832
833/*
834 * This is called when a page of a compressed cluster has been read from disk
835 * (or failed to be read from disk). It checks whether this page was the last
836 * page being waited on in the cluster, and if so, it decompresses the cluster
837 * (or in the case of a failure, cleans up without actually decompressing).
838 */
Chao Yuec3ea142021-05-20 19:51:50 +0800839void f2fs_end_read_compressed_page(struct page *page, bool failed,
840 block_t blkaddr)
Eric Biggersea29e652021-01-04 22:33:02 -0800841{
842 struct decompress_io_ctx *dic =
843 (struct decompress_io_ctx *)page_private(page);
844 struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
845
846 dec_page_count(sbi, F2FS_RD_DATA);
847
848 if (failed)
849 WRITE_ONCE(dic->failed, true);
Chao Yuec3ea142021-05-20 19:51:50 +0800850 else if (blkaddr)
851 f2fs_cache_compressed_page(sbi, page,
852 dic->inode->i_ino, blkaddr);
Eric Biggersea29e652021-01-04 22:33:02 -0800853
854 if (atomic_dec_and_test(&dic->remaining_pages))
855 f2fs_decompress_cluster(dic);
Chao Yu4c8ff702019-11-01 18:07:14 +0800856}
857
858static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
859{
860 if (cc->cluster_idx == NULL_CLUSTER)
861 return true;
862 return cc->cluster_idx == cluster_idx(cc, index);
863}
864
865bool f2fs_cluster_is_empty(struct compress_ctx *cc)
866{
867 return cc->nr_rpages == 0;
868}
869
870static bool f2fs_cluster_is_full(struct compress_ctx *cc)
871{
872 return cc->cluster_size == cc->nr_rpages;
873}
874
875bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
876{
877 if (f2fs_cluster_is_empty(cc))
878 return true;
879 return is_page_in_cluster(cc, index);
880}
881
Chao Yu1a200c42021-04-27 11:07:30 +0800882static bool cluster_has_invalid_data(struct compress_ctx *cc)
Chao Yu4c8ff702019-11-01 18:07:14 +0800883{
Chao Yu4c8ff702019-11-01 18:07:14 +0800884 loff_t i_size = i_size_read(cc->inode);
885 unsigned nr_pages = DIV_ROUND_UP(i_size, PAGE_SIZE);
886 int i;
887
888 for (i = 0; i < cc->cluster_size; i++) {
889 struct page *page = cc->rpages[i];
890
Chao Yu99139af2021-04-21 16:39:41 +0800891 f2fs_bug_on(F2FS_I_SB(cc->inode), !page);
Chao Yu4c8ff702019-11-01 18:07:14 +0800892
893 /* beyond EOF */
894 if (page->index >= nr_pages)
Chao Yu1a200c42021-04-27 11:07:30 +0800895 return true;
Chao Yu4c8ff702019-11-01 18:07:14 +0800896 }
Chao Yu1a200c42021-04-27 11:07:30 +0800897 return false;
Chao Yu4c8ff702019-11-01 18:07:14 +0800898}
899
Chao Yu844b7932021-05-12 17:52:57 +0800900static int __f2fs_cluster_blocks(struct inode *inode,
901 unsigned int cluster_idx, bool compr)
Chao Yu4c8ff702019-11-01 18:07:14 +0800902{
903 struct dnode_of_data dn;
Chao Yu844b7932021-05-12 17:52:57 +0800904 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
905 unsigned int start_idx = cluster_idx <<
906 F2FS_I(inode)->i_log_cluster_size;
Chao Yu4c8ff702019-11-01 18:07:14 +0800907 int ret;
908
Chao Yu844b7932021-05-12 17:52:57 +0800909 set_new_dnode(&dn, inode, NULL, NULL, 0);
910 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
Chao Yu4c8ff702019-11-01 18:07:14 +0800911 if (ret) {
912 if (ret == -ENOENT)
913 ret = 0;
914 goto fail;
915 }
916
917 if (dn.data_blkaddr == COMPRESS_ADDR) {
918 int i;
919
920 ret = 1;
Chao Yu844b7932021-05-12 17:52:57 +0800921 for (i = 1; i < cluster_size; i++) {
Chao Yu4c8ff702019-11-01 18:07:14 +0800922 block_t blkaddr;
923
Chao Yua2ced1c2020-02-14 17:44:10 +0800924 blkaddr = data_blkaddr(dn.inode,
Chao Yu4c8ff702019-11-01 18:07:14 +0800925 dn.node_page, dn.ofs_in_node + i);
Chao Yu1a67cbe2020-03-12 10:45:29 +0800926 if (compr) {
927 if (__is_valid_data_blkaddr(blkaddr))
928 ret++;
929 } else {
930 if (blkaddr != NULL_ADDR)
931 ret++;
932 }
Chao Yu4c8ff702019-11-01 18:07:14 +0800933 }
Chao Yue79067d2021-05-12 17:52:58 +0800934
935 f2fs_bug_on(F2FS_I_SB(inode),
Jaegeuk Kim3134ed22021-05-25 11:39:35 -0700936 !compr && ret != cluster_size &&
937 !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
Chao Yu4c8ff702019-11-01 18:07:14 +0800938 }
939fail:
940 f2fs_put_dnode(&dn);
941 return ret;
942}
943
Chao Yu1a67cbe2020-03-12 10:45:29 +0800944/* return # of compressed blocks in compressed cluster */
945static int f2fs_compressed_blocks(struct compress_ctx *cc)
946{
Chao Yu844b7932021-05-12 17:52:57 +0800947 return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
Chao Yu1a67cbe2020-03-12 10:45:29 +0800948}
949
950/* return # of valid blocks in compressed cluster */
Chao Yu4c8ff702019-11-01 18:07:14 +0800951int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
952{
Chao Yu844b7932021-05-12 17:52:57 +0800953 return __f2fs_cluster_blocks(inode,
954 index >> F2FS_I(inode)->i_log_cluster_size,
955 false);
Chao Yu4c8ff702019-11-01 18:07:14 +0800956}
957
958static bool cluster_may_compress(struct compress_ctx *cc)
959{
Daeho Jeongf15a2002020-12-01 13:08:02 +0900960 if (!f2fs_need_compress_data(cc->inode))
Chao Yu4c8ff702019-11-01 18:07:14 +0800961 return false;
962 if (f2fs_is_atomic_file(cc->inode))
963 return false;
Chao Yu4c8ff702019-11-01 18:07:14 +0800964 if (!f2fs_cluster_is_full(cc))
965 return false;
Chao Yudc35d732020-05-26 09:55:02 +0800966 if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
967 return false;
Chao Yu1a200c42021-04-27 11:07:30 +0800968 return !cluster_has_invalid_data(cc);
Chao Yu4c8ff702019-11-01 18:07:14 +0800969}
970
971static void set_cluster_writeback(struct compress_ctx *cc)
972{
973 int i;
974
975 for (i = 0; i < cc->cluster_size; i++) {
976 if (cc->rpages[i])
977 set_page_writeback(cc->rpages[i]);
978 }
979}
980
981static void set_cluster_dirty(struct compress_ctx *cc)
982{
983 int i;
984
985 for (i = 0; i < cc->cluster_size; i++)
986 if (cc->rpages[i])
987 set_page_dirty(cc->rpages[i]);
988}
989
990static int prepare_compress_overwrite(struct compress_ctx *cc,
991 struct page **pagep, pgoff_t index, void **fsdata)
992{
993 struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
994 struct address_space *mapping = cc->inode->i_mapping;
995 struct page *page;
Chao Yu4c8ff702019-11-01 18:07:14 +0800996 sector_t last_block_in_bio;
997 unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
998 pgoff_t start_idx = start_idx_of_cluster(cc);
999 int i, ret;
Chao Yu4c8ff702019-11-01 18:07:14 +08001000
1001retry:
Chao Yu844b7932021-05-12 17:52:57 +08001002 ret = f2fs_is_compressed_cluster(cc->inode, start_idx);
Chao Yu4c8ff702019-11-01 18:07:14 +08001003 if (ret <= 0)
1004 return ret;
1005
Chao Yu4c8ff702019-11-01 18:07:14 +08001006 ret = f2fs_init_compress_ctx(cc);
1007 if (ret)
1008 return ret;
1009
1010 /* keep page reference to avoid page reclaim */
1011 for (i = 0; i < cc->cluster_size; i++) {
1012 page = f2fs_pagecache_get_page(mapping, start_idx + i,
1013 fgp_flag, GFP_NOFS);
1014 if (!page) {
1015 ret = -ENOMEM;
1016 goto unlock_pages;
1017 }
1018
1019 if (PageUptodate(page))
Chao Yu7c6c8da2021-05-10 17:30:31 +08001020 f2fs_put_page(page, 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08001021 else
1022 f2fs_compress_ctx_add_page(cc, page);
1023 }
1024
1025 if (!f2fs_cluster_is_empty(cc)) {
1026 struct bio *bio = NULL;
1027
1028 ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
Chao Yu06837282020-02-18 18:21:35 +08001029 &last_block_in_bio, false, true);
Chao Yu7c6c8da2021-05-10 17:30:31 +08001030 f2fs_put_rpages(cc);
Chao Yu4c4dcb82021-05-10 17:30:32 +08001031 f2fs_destroy_compress_ctx(cc, true);
Chao Yu4c8ff702019-11-01 18:07:14 +08001032 if (ret)
Chao Yu7c6c8da2021-05-10 17:30:31 +08001033 goto out;
Chao Yu4c8ff702019-11-01 18:07:14 +08001034 if (bio)
1035 f2fs_submit_bio(sbi, bio, DATA);
1036
1037 ret = f2fs_init_compress_ctx(cc);
1038 if (ret)
Chao Yu7c6c8da2021-05-10 17:30:31 +08001039 goto out;
Chao Yu4c8ff702019-11-01 18:07:14 +08001040 }
1041
1042 for (i = 0; i < cc->cluster_size; i++) {
1043 f2fs_bug_on(sbi, cc->rpages[i]);
1044
1045 page = find_lock_page(mapping, start_idx + i);
Chao Yu7c6c8da2021-05-10 17:30:31 +08001046 if (!page) {
1047 /* page can be truncated */
1048 goto release_and_retry;
1049 }
Chao Yu4c8ff702019-11-01 18:07:14 +08001050
1051 f2fs_wait_on_page_writeback(page, DATA, true, true);
Chao Yu4c8ff702019-11-01 18:07:14 +08001052 f2fs_compress_ctx_add_page(cc, page);
Chao Yu4c8ff702019-11-01 18:07:14 +08001053
1054 if (!PageUptodate(page)) {
Chao Yu7c6c8da2021-05-10 17:30:31 +08001055release_and_retry:
1056 f2fs_put_rpages(cc);
Chao Yu4c8ff702019-11-01 18:07:14 +08001057 f2fs_unlock_rpages(cc, i + 1);
Chao Yu4c4dcb82021-05-10 17:30:32 +08001058 f2fs_destroy_compress_ctx(cc, true);
Chao Yu4c8ff702019-11-01 18:07:14 +08001059 goto retry;
1060 }
1061 }
1062
Chao Yu4c8ff702019-11-01 18:07:14 +08001063 if (likely(!ret)) {
1064 *fsdata = cc->rpages;
1065 *pagep = cc->rpages[offset_in_cluster(cc, index)];
1066 return cc->cluster_size;
1067 }
1068
1069unlock_pages:
Chao Yu7c6c8da2021-05-10 17:30:31 +08001070 f2fs_put_rpages(cc);
Chao Yu4c8ff702019-11-01 18:07:14 +08001071 f2fs_unlock_rpages(cc, i);
Chao Yu4c4dcb82021-05-10 17:30:32 +08001072 f2fs_destroy_compress_ctx(cc, true);
Chao Yu7c6c8da2021-05-10 17:30:31 +08001073out:
Chao Yu4c8ff702019-11-01 18:07:14 +08001074 return ret;
1075}
1076
1077int f2fs_prepare_compress_overwrite(struct inode *inode,
1078 struct page **pagep, pgoff_t index, void **fsdata)
1079{
1080 struct compress_ctx cc = {
1081 .inode = inode,
1082 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1083 .cluster_size = F2FS_I(inode)->i_cluster_size,
1084 .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
1085 .rpages = NULL,
1086 .nr_rpages = 0,
1087 };
1088
1089 return prepare_compress_overwrite(&cc, pagep, index, fsdata);
1090}
1091
1092bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
1093 pgoff_t index, unsigned copied)
1094
1095{
1096 struct compress_ctx cc = {
Chao Yu31083032020-09-14 17:05:13 +08001097 .inode = inode,
Chao Yu4c8ff702019-11-01 18:07:14 +08001098 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
1099 .cluster_size = F2FS_I(inode)->i_cluster_size,
1100 .rpages = fsdata,
1101 };
1102 bool first_index = (index == cc.rpages[0]->index);
1103
1104 if (copied)
1105 set_cluster_dirty(&cc);
1106
1107 f2fs_put_rpages_wbc(&cc, NULL, false, 1);
Chao Yu4c4dcb82021-05-10 17:30:32 +08001108 f2fs_destroy_compress_ctx(&cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08001109
1110 return first_index;
1111}
1112
Chao Yu3265d3d2020-03-18 16:22:59 +08001113int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
1114{
1115 void *fsdata = NULL;
1116 struct page *pagep;
1117 int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
1118 pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
1119 log_cluster_size;
1120 int err;
1121
1122 err = f2fs_is_compressed_cluster(inode, start_idx);
1123 if (err < 0)
1124 return err;
1125
1126 /* truncate normal cluster */
1127 if (!err)
1128 return f2fs_do_truncate_blocks(inode, from, lock);
1129
1130 /* truncate compressed cluster */
1131 err = f2fs_prepare_compress_overwrite(inode, &pagep,
1132 start_idx, &fsdata);
1133
1134 /* should not be a normal cluster */
1135 f2fs_bug_on(F2FS_I_SB(inode), err == 0);
1136
1137 if (err <= 0)
1138 return err;
1139
1140 if (err > 0) {
1141 struct page **rpages = fsdata;
1142 int cluster_size = F2FS_I(inode)->i_cluster_size;
1143 int i;
1144
1145 for (i = cluster_size - 1; i >= 0; i--) {
1146 loff_t start = rpages[i]->index << PAGE_SHIFT;
1147
1148 if (from <= start) {
1149 zero_user_segment(rpages[i], 0, PAGE_SIZE);
1150 } else {
1151 zero_user_segment(rpages[i], from - start,
1152 PAGE_SIZE);
1153 break;
1154 }
1155 }
1156
1157 f2fs_compress_write_end(inode, fsdata, start_idx, true);
1158 }
1159 return 0;
1160}
1161
Chao Yu4c8ff702019-11-01 18:07:14 +08001162static int f2fs_write_compressed_pages(struct compress_ctx *cc,
1163 int *submitted,
1164 struct writeback_control *wbc,
1165 enum iostat_type io_type)
1166{
1167 struct inode *inode = cc->inode;
1168 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1169 struct f2fs_inode_info *fi = F2FS_I(inode);
1170 struct f2fs_io_info fio = {
1171 .sbi = sbi,
1172 .ino = cc->inode->i_ino,
1173 .type = DATA,
1174 .op = REQ_OP_WRITE,
1175 .op_flags = wbc_to_write_flags(wbc),
1176 .old_blkaddr = NEW_ADDR,
1177 .page = NULL,
1178 .encrypted_page = NULL,
1179 .compressed_page = NULL,
1180 .submitted = false,
Chao Yu4c8ff702019-11-01 18:07:14 +08001181 .io_type = io_type,
1182 .io_wbc = wbc,
Satya Tangirala27aacd22020-07-02 01:56:06 +00001183 .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode),
Chao Yu4c8ff702019-11-01 18:07:14 +08001184 };
1185 struct dnode_of_data dn;
1186 struct node_info ni;
1187 struct compress_io_ctx *cic;
1188 pgoff_t start_idx = start_idx_of_cluster(cc);
1189 unsigned int last_index = cc->cluster_size - 1;
1190 loff_t psize;
1191 int i, err;
1192
Chao Yue7eb6b52021-04-27 11:07:30 +08001193 /* we should bypass data pages to proceed the kworkder jobs */
1194 if (unlikely(f2fs_cp_error(sbi))) {
1195 mapping_set_error(cc->rpages[0]->mapping, -EIO);
1196 goto out_free;
1197 }
1198
Chao Yu79963d92020-06-18 14:36:23 +08001199 if (IS_NOQUOTA(inode)) {
1200 /*
1201 * We need to wait for node_write to avoid block allocation during
1202 * checkpoint. This can only happen to quota writes which can cause
1203 * the below discard race condition.
1204 */
Tim Murray7e6f1122022-01-07 12:48:44 -08001205 f2fs_down_read(&sbi->node_write);
Chao Yu79963d92020-06-18 14:36:23 +08001206 } else if (!f2fs_trylock_op(sbi)) {
Chao Yu31083032020-09-14 17:05:13 +08001207 goto out_free;
Chao Yu79963d92020-06-18 14:36:23 +08001208 }
Chao Yu4c8ff702019-11-01 18:07:14 +08001209
Chao Yudf77fbd2020-02-24 19:20:16 +08001210 set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
Chao Yu4c8ff702019-11-01 18:07:14 +08001211
1212 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
1213 if (err)
1214 goto out_unlock_op;
1215
1216 for (i = 0; i < cc->cluster_size; i++) {
Chao Yua2ced1c2020-02-14 17:44:10 +08001217 if (data_blkaddr(dn.inode, dn.node_page,
Chao Yu4c8ff702019-11-01 18:07:14 +08001218 dn.ofs_in_node + i) == NULL_ADDR)
1219 goto out_put_dnode;
1220 }
1221
1222 psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
1223
Jaegeuk Kim23686f52021-12-13 14:16:32 -08001224 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08001225 if (err)
1226 goto out_put_dnode;
1227
1228 fio.version = ni.version;
1229
Chao Yuc68d6c82020-09-14 17:05:14 +08001230 cic = kmem_cache_zalloc(cic_entry_slab, GFP_NOFS);
Chao Yu4c8ff702019-11-01 18:07:14 +08001231 if (!cic)
1232 goto out_put_dnode;
1233
1234 cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1235 cic->inode = inode;
Chao Yue6c39482020-08-10 18:39:30 +08001236 atomic_set(&cic->pending_pages, cc->nr_cpages);
Chao Yu31083032020-09-14 17:05:13 +08001237 cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +08001238 if (!cic->rpages)
1239 goto out_put_cic;
1240
1241 cic->nr_rpages = cc->cluster_size;
1242
1243 for (i = 0; i < cc->nr_cpages; i++) {
1244 f2fs_set_compressed_page(cc->cpages[i], inode,
Chao Yu887347a2020-03-28 17:33:23 +08001245 cc->rpages[i + 1]->index, cic);
Chao Yu4c8ff702019-11-01 18:07:14 +08001246 fio.compressed_page = cc->cpages[i];
Chao Yuf567adb2020-07-03 16:40:11 +08001247
1248 fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page,
1249 dn.ofs_in_node + i + 1);
1250
1251 /* wait for GCed page writeback via META_MAPPING */
1252 f2fs_wait_on_block_writeback(inode, fio.old_blkaddr);
1253
Chao Yu4c8ff702019-11-01 18:07:14 +08001254 if (fio.encrypted) {
1255 fio.page = cc->rpages[i + 1];
1256 err = f2fs_encrypt_one_page(&fio);
1257 if (err)
1258 goto out_destroy_crypt;
1259 cc->cpages[i] = fio.encrypted_page;
1260 }
1261 }
1262
1263 set_cluster_writeback(cc);
1264
1265 for (i = 0; i < cc->cluster_size; i++)
1266 cic->rpages[i] = cc->rpages[i];
1267
1268 for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
1269 block_t blkaddr;
1270
Chao Yua2ced1c2020-02-14 17:44:10 +08001271 blkaddr = f2fs_data_blkaddr(&dn);
Chao Yu95978ca2020-02-28 18:08:46 +08001272 fio.page = cc->rpages[i];
Chao Yu4c8ff702019-11-01 18:07:14 +08001273 fio.old_blkaddr = blkaddr;
1274
1275 /* cluster header */
1276 if (i == 0) {
1277 if (blkaddr == COMPRESS_ADDR)
1278 fio.compr_blocks++;
1279 if (__is_valid_data_blkaddr(blkaddr))
1280 f2fs_invalidate_blocks(sbi, blkaddr);
1281 f2fs_update_data_blkaddr(&dn, COMPRESS_ADDR);
1282 goto unlock_continue;
1283 }
1284
1285 if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
1286 fio.compr_blocks++;
1287
1288 if (i > cc->nr_cpages) {
1289 if (__is_valid_data_blkaddr(blkaddr)) {
1290 f2fs_invalidate_blocks(sbi, blkaddr);
1291 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
1292 }
1293 goto unlock_continue;
1294 }
1295
1296 f2fs_bug_on(fio.sbi, blkaddr == NULL_ADDR);
1297
1298 if (fio.encrypted)
1299 fio.encrypted_page = cc->cpages[i - 1];
1300 else
1301 fio.compressed_page = cc->cpages[i - 1];
1302
1303 cc->cpages[i - 1] = NULL;
1304 f2fs_outplace_write_data(&dn, &fio);
1305 (*submitted)++;
1306unlock_continue:
1307 inode_dec_dirty_pages(cc->inode);
1308 unlock_page(fio.page);
1309 }
1310
1311 if (fio.compr_blocks)
1312 f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
1313 f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
Daeho Jeong48b53272021-03-15 17:12:33 +09001314 add_compr_block_stat(inode, cc->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +08001315
1316 set_inode_flag(cc->inode, FI_APPEND_WRITE);
1317 if (cc->cluster_idx == 0)
1318 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1319
1320 f2fs_put_dnode(&dn);
Chao Yu79963d92020-06-18 14:36:23 +08001321 if (IS_NOQUOTA(inode))
Tim Murray7e6f1122022-01-07 12:48:44 -08001322 f2fs_up_read(&sbi->node_write);
Chao Yu79963d92020-06-18 14:36:23 +08001323 else
Jaegeuk Kim435cbab2020-04-09 10:25:21 -07001324 f2fs_unlock_op(sbi);
Chao Yu4c8ff702019-11-01 18:07:14 +08001325
Chao Yuc10c9822020-02-27 19:30:03 +08001326 spin_lock(&fi->i_size_lock);
Chao Yu4c8ff702019-11-01 18:07:14 +08001327 if (fi->last_disk_size < psize)
1328 fi->last_disk_size = psize;
Chao Yuc10c9822020-02-27 19:30:03 +08001329 spin_unlock(&fi->i_size_lock);
Chao Yu4c8ff702019-11-01 18:07:14 +08001330
1331 f2fs_put_rpages(cc);
Chao Yu31083032020-09-14 17:05:13 +08001332 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1333 cc->cpages = NULL;
Chao Yu4c4dcb82021-05-10 17:30:32 +08001334 f2fs_destroy_compress_ctx(cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08001335 return 0;
1336
1337out_destroy_crypt:
Chao Yu31083032020-09-14 17:05:13 +08001338 page_array_free(cc->inode, cic->rpages, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +08001339
1340 for (--i; i >= 0; i--)
1341 fscrypt_finalize_bounce_page(&cc->cpages[i]);
Chao Yu4c8ff702019-11-01 18:07:14 +08001342out_put_cic:
Chao Yuc68d6c82020-09-14 17:05:14 +08001343 kmem_cache_free(cic_entry_slab, cic);
Chao Yu4c8ff702019-11-01 18:07:14 +08001344out_put_dnode:
1345 f2fs_put_dnode(&dn);
1346out_unlock_op:
Chao Yu79963d92020-06-18 14:36:23 +08001347 if (IS_NOQUOTA(inode))
Tim Murray7e6f1122022-01-07 12:48:44 -08001348 f2fs_up_read(&sbi->node_write);
Chao Yu79963d92020-06-18 14:36:23 +08001349 else
Jaegeuk Kim435cbab2020-04-09 10:25:21 -07001350 f2fs_unlock_op(sbi);
Chao Yu31083032020-09-14 17:05:13 +08001351out_free:
Jaegeuk Kimd04925f2021-08-30 11:37:32 -07001352 for (i = 0; i < cc->nr_cpages; i++) {
1353 if (!cc->cpages[i])
1354 continue;
1355 f2fs_compress_free_page(cc->cpages[i]);
1356 cc->cpages[i] = NULL;
1357 }
Chao Yu31083032020-09-14 17:05:13 +08001358 page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
1359 cc->cpages = NULL;
Chao Yu4c8ff702019-11-01 18:07:14 +08001360 return -EAGAIN;
1361}
1362
1363void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
1364{
1365 struct f2fs_sb_info *sbi = bio->bi_private;
1366 struct compress_io_ctx *cic =
1367 (struct compress_io_ctx *)page_private(page);
1368 int i;
1369
1370 if (unlikely(bio->bi_status))
1371 mapping_set_error(cic->inode->i_mapping, -EIO);
1372
Chao Yu5e6bbde2020-04-08 19:56:05 +08001373 f2fs_compress_free_page(page);
Chao Yu4c8ff702019-11-01 18:07:14 +08001374
1375 dec_page_count(sbi, F2FS_WB_DATA);
1376
Chao Yue6c39482020-08-10 18:39:30 +08001377 if (atomic_dec_return(&cic->pending_pages))
Chao Yu4c8ff702019-11-01 18:07:14 +08001378 return;
1379
1380 for (i = 0; i < cic->nr_rpages; i++) {
1381 WARN_ON(!cic->rpages[i]);
Chao Yubdc14e12021-04-28 17:20:31 +08001382 clear_page_private_gcing(cic->rpages[i]);
Chao Yu4c8ff702019-11-01 18:07:14 +08001383 end_page_writeback(cic->rpages[i]);
1384 }
1385
Chao Yu31083032020-09-14 17:05:13 +08001386 page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
Chao Yuc68d6c82020-09-14 17:05:14 +08001387 kmem_cache_free(cic_entry_slab, cic);
Chao Yu4c8ff702019-11-01 18:07:14 +08001388}
1389
1390static int f2fs_write_raw_pages(struct compress_ctx *cc,
1391 int *submitted,
1392 struct writeback_control *wbc,
1393 enum iostat_type io_type)
1394{
1395 struct address_space *mapping = cc->inode->i_mapping;
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001396 int _submitted, compr_blocks, ret, i;
Chao Yu4c8ff702019-11-01 18:07:14 +08001397
1398 compr_blocks = f2fs_compressed_blocks(cc);
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001399
1400 for (i = 0; i < cc->cluster_size; i++) {
1401 if (!cc->rpages[i])
1402 continue;
1403
1404 redirty_page_for_writepage(wbc, cc->rpages[i]);
1405 unlock_page(cc->rpages[i]);
Chao Yu4c8ff702019-11-01 18:07:14 +08001406 }
1407
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001408 if (compr_blocks < 0)
1409 return compr_blocks;
1410
Chao Yu4c8ff702019-11-01 18:07:14 +08001411 for (i = 0; i < cc->cluster_size; i++) {
1412 if (!cc->rpages[i])
1413 continue;
1414retry_write:
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001415 lock_page(cc->rpages[i]);
1416
Chao Yu4c8ff702019-11-01 18:07:14 +08001417 if (cc->rpages[i]->mapping != mapping) {
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001418continue_unlock:
Chao Yu4c8ff702019-11-01 18:07:14 +08001419 unlock_page(cc->rpages[i]);
1420 continue;
1421 }
1422
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001423 if (!PageDirty(cc->rpages[i]))
1424 goto continue_unlock;
1425
1426 if (!clear_page_dirty_for_io(cc->rpages[i]))
1427 goto continue_unlock;
Chao Yu4c8ff702019-11-01 18:07:14 +08001428
1429 ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
1430 NULL, NULL, wbc, io_type,
Chao Yu77844182021-01-11 17:42:53 +08001431 compr_blocks, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08001432 if (ret) {
1433 if (ret == AOP_WRITEPAGE_ACTIVATE) {
1434 unlock_page(cc->rpages[i]);
1435 ret = 0;
1436 } else if (ret == -EAGAIN) {
Chao Yu466357d2020-03-20 18:14:31 +08001437 /*
1438 * for quota file, just redirty left pages to
1439 * avoid deadlock caused by cluster update race
1440 * from foreground operation.
1441 */
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001442 if (IS_NOQUOTA(cc->inode))
1443 return 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08001444 ret = 0;
1445 cond_resched();
Chao Yu5df7731f2020-02-17 17:45:44 +08001446 congestion_wait(BLK_RW_ASYNC,
1447 DEFAULT_IO_TIMEOUT);
Chao Yu4c8ff702019-11-01 18:07:14 +08001448 goto retry_write;
1449 }
Hyeong-Jun Kim39ad0582021-12-10 13:30:12 +09001450 return ret;
Chao Yu4c8ff702019-11-01 18:07:14 +08001451 }
1452
1453 *submitted += _submitted;
1454 }
Chao Yu77844182021-01-11 17:42:53 +08001455
1456 f2fs_balance_fs(F2FS_M_SB(mapping), true);
1457
Chao Yu4c8ff702019-11-01 18:07:14 +08001458 return 0;
Chao Yu4c8ff702019-11-01 18:07:14 +08001459}
1460
1461int f2fs_write_multi_pages(struct compress_ctx *cc,
1462 int *submitted,
1463 struct writeback_control *wbc,
1464 enum iostat_type io_type)
1465{
Chao Yu4c8ff702019-11-01 18:07:14 +08001466 int err;
1467
1468 *submitted = 0;
1469 if (cluster_may_compress(cc)) {
1470 err = f2fs_compress_pages(cc);
1471 if (err == -EAGAIN) {
1472 goto write;
1473 } else if (err) {
1474 f2fs_put_rpages_wbc(cc, wbc, true, 1);
1475 goto destroy_out;
1476 }
1477
1478 err = f2fs_write_compressed_pages(cc, submitted,
1479 wbc, io_type);
Chao Yu4c8ff702019-11-01 18:07:14 +08001480 if (!err)
1481 return 0;
1482 f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN);
1483 }
1484write:
1485 f2fs_bug_on(F2FS_I_SB(cc->inode), *submitted);
1486
1487 err = f2fs_write_raw_pages(cc, submitted, wbc, io_type);
1488 f2fs_put_rpages_wbc(cc, wbc, false, 0);
1489destroy_out:
Chao Yu4c4dcb82021-05-10 17:30:32 +08001490 f2fs_destroy_compress_ctx(cc, false);
Chao Yu4c8ff702019-11-01 18:07:14 +08001491 return err;
1492}
1493
Eric Biggersea29e652021-01-04 22:33:02 -08001494static void f2fs_free_dic(struct decompress_io_ctx *dic);
1495
Chao Yu4c8ff702019-11-01 18:07:14 +08001496struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
1497{
Chao Yu4c8ff702019-11-01 18:07:14 +08001498 struct decompress_io_ctx *dic;
1499 pgoff_t start_idx = start_idx_of_cluster(cc);
1500 int i;
1501
Chao Yuc68d6c82020-09-14 17:05:14 +08001502 dic = kmem_cache_zalloc(dic_entry_slab, GFP_NOFS);
Chao Yu4c8ff702019-11-01 18:07:14 +08001503 if (!dic)
1504 return ERR_PTR(-ENOMEM);
1505
Chao Yu31083032020-09-14 17:05:13 +08001506 dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +08001507 if (!dic->rpages) {
Chao Yuc68d6c82020-09-14 17:05:14 +08001508 kmem_cache_free(dic_entry_slab, dic);
Chao Yu4c8ff702019-11-01 18:07:14 +08001509 return ERR_PTR(-ENOMEM);
1510 }
1511
1512 dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
1513 dic->inode = cc->inode;
Eric Biggersea29e652021-01-04 22:33:02 -08001514 atomic_set(&dic->remaining_pages, cc->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +08001515 dic->cluster_idx = cc->cluster_idx;
1516 dic->cluster_size = cc->cluster_size;
1517 dic->log_cluster_size = cc->log_cluster_size;
1518 dic->nr_cpages = cc->nr_cpages;
Eric Biggersea29e652021-01-04 22:33:02 -08001519 refcount_set(&dic->refcnt, 1);
Chao Yu4c8ff702019-11-01 18:07:14 +08001520 dic->failed = false;
Eric Biggersea29e652021-01-04 22:33:02 -08001521 dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
Chao Yu4c8ff702019-11-01 18:07:14 +08001522
1523 for (i = 0; i < dic->cluster_size; i++)
1524 dic->rpages[i] = cc->rpages[i];
1525 dic->nr_rpages = cc->cluster_size;
1526
Chao Yu31083032020-09-14 17:05:13 +08001527 dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +08001528 if (!dic->cpages)
1529 goto out_free;
1530
1531 for (i = 0; i < dic->nr_cpages; i++) {
1532 struct page *page;
1533
Chao Yu5e6bbde2020-04-08 19:56:05 +08001534 page = f2fs_compress_alloc_page();
Chao Yu4c8ff702019-11-01 18:07:14 +08001535 if (!page)
1536 goto out_free;
1537
1538 f2fs_set_compressed_page(page, cc->inode,
Chao Yu887347a2020-03-28 17:33:23 +08001539 start_idx + i + 1, dic);
Chao Yu4c8ff702019-11-01 18:07:14 +08001540 dic->cpages[i] = page;
1541 }
1542
Chao Yu4c8ff702019-11-01 18:07:14 +08001543 return dic;
1544
1545out_free:
1546 f2fs_free_dic(dic);
1547 return ERR_PTR(-ENOMEM);
1548}
1549
Eric Biggersea29e652021-01-04 22:33:02 -08001550static void f2fs_free_dic(struct decompress_io_ctx *dic)
Chao Yu4c8ff702019-11-01 18:07:14 +08001551{
1552 int i;
1553
1554 if (dic->tpages) {
1555 for (i = 0; i < dic->cluster_size; i++) {
1556 if (dic->rpages[i])
1557 continue;
Chao Yu8908e752020-03-26 17:42:26 +08001558 if (!dic->tpages[i])
1559 continue;
Chao Yu5e6bbde2020-04-08 19:56:05 +08001560 f2fs_compress_free_page(dic->tpages[i]);
Chao Yu4c8ff702019-11-01 18:07:14 +08001561 }
Chao Yu31083032020-09-14 17:05:13 +08001562 page_array_free(dic->inode, dic->tpages, dic->cluster_size);
Chao Yu4c8ff702019-11-01 18:07:14 +08001563 }
1564
1565 if (dic->cpages) {
1566 for (i = 0; i < dic->nr_cpages; i++) {
1567 if (!dic->cpages[i])
1568 continue;
Chao Yu5e6bbde2020-04-08 19:56:05 +08001569 f2fs_compress_free_page(dic->cpages[i]);
Chao Yu4c8ff702019-11-01 18:07:14 +08001570 }
Chao Yu31083032020-09-14 17:05:13 +08001571 page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
Chao Yu4c8ff702019-11-01 18:07:14 +08001572 }
1573
Chao Yu31083032020-09-14 17:05:13 +08001574 page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
Chao Yuc68d6c82020-09-14 17:05:14 +08001575 kmem_cache_free(dic_entry_slab, dic);
Chao Yu4c8ff702019-11-01 18:07:14 +08001576}
1577
Eric Biggersea29e652021-01-04 22:33:02 -08001578static void f2fs_put_dic(struct decompress_io_ctx *dic)
1579{
1580 if (refcount_dec_and_test(&dic->refcnt))
1581 f2fs_free_dic(dic);
1582}
1583
1584/*
1585 * Update and unlock the cluster's pagecache pages, and release the reference to
1586 * the decompress_io_ctx that was being held for I/O completion.
1587 */
1588static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
Chao Yu4c8ff702019-11-01 18:07:14 +08001589{
1590 int i;
1591
Eric Biggersea29e652021-01-04 22:33:02 -08001592 for (i = 0; i < dic->cluster_size; i++) {
1593 struct page *rpage = dic->rpages[i];
Chao Yu4c8ff702019-11-01 18:07:14 +08001594
1595 if (!rpage)
1596 continue;
1597
Eric Biggersea29e652021-01-04 22:33:02 -08001598 /* PG_error was set if verity failed. */
1599 if (failed || PageError(rpage)) {
1600 ClearPageUptodate(rpage);
1601 /* will re-read again later */
1602 ClearPageError(rpage);
1603 } else {
Chao Yu23c51be2020-03-21 20:24:11 +08001604 SetPageUptodate(rpage);
Chao Yu4c8ff702019-11-01 18:07:14 +08001605 }
Chao Yu4c8ff702019-11-01 18:07:14 +08001606 unlock_page(rpage);
1607 }
Eric Biggersea29e652021-01-04 22:33:02 -08001608
1609 f2fs_put_dic(dic);
1610}
1611
1612static void f2fs_verify_cluster(struct work_struct *work)
1613{
1614 struct decompress_io_ctx *dic =
1615 container_of(work, struct decompress_io_ctx, verity_work);
1616 int i;
1617
1618 /* Verify the cluster's decompressed pages with fs-verity. */
1619 for (i = 0; i < dic->cluster_size; i++) {
1620 struct page *rpage = dic->rpages[i];
1621
1622 if (rpage && !fsverity_verify_page(rpage))
1623 SetPageError(rpage);
1624 }
1625
1626 __f2fs_decompress_end_io(dic, false);
1627}
1628
1629/*
1630 * This is called when a compressed cluster has been decompressed
1631 * (or failed to be read and/or decompressed).
1632 */
1633void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
1634{
1635 if (!failed && dic->need_verity) {
1636 /*
1637 * Note that to avoid deadlocks, the verity work can't be done
1638 * on the decompression workqueue. This is because verifying
1639 * the data pages can involve reading metadata pages from the
1640 * file, and these metadata pages may be compressed.
1641 */
1642 INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
1643 fsverity_enqueue_verify_work(&dic->verity_work);
1644 } else {
1645 __f2fs_decompress_end_io(dic, failed);
1646 }
1647}
1648
1649/*
1650 * Put a reference to a compressed page's decompress_io_ctx.
1651 *
1652 * This is called when the page is no longer needed and can be freed.
1653 */
1654void f2fs_put_page_dic(struct page *page)
1655{
1656 struct decompress_io_ctx *dic =
1657 (struct decompress_io_ctx *)page_private(page);
1658
1659 f2fs_put_dic(dic);
Chao Yu4c8ff702019-11-01 18:07:14 +08001660}
Chao Yu31083032020-09-14 17:05:13 +08001661
Chao Yuec3ea142021-05-20 19:51:50 +08001662const struct address_space_operations f2fs_compress_aops = {
1663 .releasepage = f2fs_release_page,
1664 .invalidatepage = f2fs_invalidate_page,
1665};
1666
1667struct address_space *COMPRESS_MAPPING(struct f2fs_sb_info *sbi)
1668{
1669 return sbi->compress_inode->i_mapping;
1670}
1671
1672void f2fs_invalidate_compress_page(struct f2fs_sb_info *sbi, block_t blkaddr)
1673{
1674 if (!sbi->compress_inode)
1675 return;
1676 invalidate_mapping_pages(COMPRESS_MAPPING(sbi), blkaddr, blkaddr);
1677}
1678
1679void f2fs_cache_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1680 nid_t ino, block_t blkaddr)
1681{
1682 struct page *cpage;
1683 int ret;
1684
1685 if (!test_opt(sbi, COMPRESS_CACHE))
1686 return;
1687
1688 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1689 return;
1690
1691 if (!f2fs_available_free_memory(sbi, COMPRESS_PAGE))
1692 return;
1693
1694 cpage = find_get_page(COMPRESS_MAPPING(sbi), blkaddr);
1695 if (cpage) {
1696 f2fs_put_page(cpage, 0);
1697 return;
1698 }
1699
1700 cpage = alloc_page(__GFP_NOWARN | __GFP_IO);
1701 if (!cpage)
1702 return;
1703
1704 ret = add_to_page_cache_lru(cpage, COMPRESS_MAPPING(sbi),
1705 blkaddr, GFP_NOFS);
1706 if (ret) {
1707 f2fs_put_page(cpage, 0);
1708 return;
1709 }
1710
1711 set_page_private_data(cpage, ino);
1712
1713 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE_READ))
1714 goto out;
1715
1716 memcpy(page_address(cpage), page_address(page), PAGE_SIZE);
1717 SetPageUptodate(cpage);
1718out:
1719 f2fs_put_page(cpage, 1);
1720}
1721
1722bool f2fs_load_compressed_page(struct f2fs_sb_info *sbi, struct page *page,
1723 block_t blkaddr)
1724{
1725 struct page *cpage;
1726 bool hitted = false;
1727
1728 if (!test_opt(sbi, COMPRESS_CACHE))
1729 return false;
1730
1731 cpage = f2fs_pagecache_get_page(COMPRESS_MAPPING(sbi),
1732 blkaddr, FGP_LOCK | FGP_NOWAIT, GFP_NOFS);
1733 if (cpage) {
1734 if (PageUptodate(cpage)) {
1735 atomic_inc(&sbi->compress_page_hit);
1736 memcpy(page_address(page),
1737 page_address(cpage), PAGE_SIZE);
1738 hitted = true;
1739 }
1740 f2fs_put_page(cpage, 1);
1741 }
1742
1743 return hitted;
1744}
1745
1746void f2fs_invalidate_compress_pages(struct f2fs_sb_info *sbi, nid_t ino)
1747{
1748 struct address_space *mapping = sbi->compress_inode->i_mapping;
1749 struct pagevec pvec;
1750 pgoff_t index = 0;
1751 pgoff_t end = MAX_BLKADDR(sbi);
1752
1753 if (!mapping->nrpages)
1754 return;
1755
1756 pagevec_init(&pvec);
1757
1758 do {
1759 unsigned int nr_pages;
1760 int i;
1761
1762 nr_pages = pagevec_lookup_range(&pvec, mapping,
1763 &index, end - 1);
1764 if (!nr_pages)
1765 break;
1766
1767 for (i = 0; i < nr_pages; i++) {
1768 struct page *page = pvec.pages[i];
1769
1770 if (page->index > end)
1771 break;
1772
1773 lock_page(page);
1774 if (page->mapping != mapping) {
1775 unlock_page(page);
1776 continue;
1777 }
1778
1779 if (ino != get_page_private_data(page)) {
1780 unlock_page(page);
1781 continue;
1782 }
1783
1784 generic_error_remove_page(mapping, page);
1785 unlock_page(page);
1786 }
1787 pagevec_release(&pvec);
1788 cond_resched();
1789 } while (index < end);
1790}
1791
1792int f2fs_init_compress_inode(struct f2fs_sb_info *sbi)
1793{
1794 struct inode *inode;
1795
1796 if (!test_opt(sbi, COMPRESS_CACHE))
1797 return 0;
1798
1799 inode = f2fs_iget(sbi->sb, F2FS_COMPRESS_INO(sbi));
1800 if (IS_ERR(inode))
1801 return PTR_ERR(inode);
1802 sbi->compress_inode = inode;
1803
1804 sbi->compress_percent = COMPRESS_PERCENT;
1805 sbi->compress_watermark = COMPRESS_WATERMARK;
1806
1807 atomic_set(&sbi->compress_page_hit, 0);
1808
1809 return 0;
1810}
1811
1812void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
1813{
1814 if (!sbi->compress_inode)
1815 return;
1816 iput(sbi->compress_inode);
1817 sbi->compress_inode = NULL;
1818}
1819
Chao Yu31083032020-09-14 17:05:13 +08001820int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
1821{
1822 dev_t dev = sbi->sb->s_bdev->bd_dev;
1823 char slab_name[32];
1824
1825 sprintf(slab_name, "f2fs_page_array_entry-%u:%u", MAJOR(dev), MINOR(dev));
1826
1827 sbi->page_array_slab_size = sizeof(struct page *) <<
1828 F2FS_OPTION(sbi).compress_log_size;
1829
1830 sbi->page_array_slab = f2fs_kmem_cache_create(slab_name,
1831 sbi->page_array_slab_size);
1832 if (!sbi->page_array_slab)
1833 return -ENOMEM;
1834 return 0;
1835}
1836
1837void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi)
1838{
1839 kmem_cache_destroy(sbi->page_array_slab);
1840}
Chao Yuc68d6c82020-09-14 17:05:14 +08001841
1842static int __init f2fs_init_cic_cache(void)
1843{
1844 cic_entry_slab = f2fs_kmem_cache_create("f2fs_cic_entry",
1845 sizeof(struct compress_io_ctx));
1846 if (!cic_entry_slab)
1847 return -ENOMEM;
1848 return 0;
1849}
1850
1851static void f2fs_destroy_cic_cache(void)
1852{
1853 kmem_cache_destroy(cic_entry_slab);
1854}
1855
1856static int __init f2fs_init_dic_cache(void)
1857{
1858 dic_entry_slab = f2fs_kmem_cache_create("f2fs_dic_entry",
1859 sizeof(struct decompress_io_ctx));
1860 if (!dic_entry_slab)
1861 return -ENOMEM;
1862 return 0;
1863}
1864
1865static void f2fs_destroy_dic_cache(void)
1866{
1867 kmem_cache_destroy(dic_entry_slab);
1868}
1869
1870int __init f2fs_init_compress_cache(void)
1871{
1872 int err;
1873
1874 err = f2fs_init_cic_cache();
1875 if (err)
1876 goto out;
1877 err = f2fs_init_dic_cache();
1878 if (err)
1879 goto free_cic;
1880 return 0;
1881free_cic:
1882 f2fs_destroy_cic_cache();
1883out:
1884 return -ENOMEM;
1885}
1886
1887void f2fs_destroy_compress_cache(void)
1888{
1889 f2fs_destroy_dic_cache();
1890 f2fs_destroy_cic_cache();
1891}