Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 2 | /* |
| 3 | * This contains encryption functions for per-file encryption. |
| 4 | * |
| 5 | * Copyright (C) 2015, Google, Inc. |
| 6 | * Copyright (C) 2015, Motorola Mobility |
| 7 | * |
| 8 | * Written by Michael Halcrow, 2014. |
| 9 | * |
| 10 | * Filename encryption additions |
| 11 | * Uday Savagaonkar, 2014 |
| 12 | * Encryption policy handling additions |
| 13 | * Ildar Muslukhov, 2014 |
| 14 | * Add fscrypt_pullback_bio_page() |
| 15 | * Jaegeuk Kim, 2015. |
| 16 | * |
| 17 | * This has not yet undergone a rigorous security audit. |
| 18 | * |
| 19 | * The usage of AES-XTS should conform to recommendations in NIST |
| 20 | * Special Publication 800-38E and IEEE P1619/D16. |
| 21 | */ |
| 22 | |
| 23 | #include <linux/pagemap.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/bio.h> |
| 26 | #include <linux/namei.h> |
| 27 | #include "fscrypt_private.h" |
| 28 | |
Eric Biggers | 1565bda | 2019-10-09 16:34:17 -0700 | [diff] [blame] | 29 | void fscrypt_decrypt_bio(struct bio *bio) |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 30 | { |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 31 | struct bio_vec *bv; |
Ming Lei | 6dc4f10 | 2019-02-15 19:13:19 +0800 | [diff] [blame] | 32 | struct bvec_iter_all iter_all; |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 33 | |
Christoph Hellwig | 2b070cf | 2019-04-25 09:03:00 +0200 | [diff] [blame] | 34 | bio_for_each_segment_all(bv, bio, iter_all) { |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 35 | struct page *page = bv->bv_page; |
Eric Biggers | ffceeef | 2019-05-20 09:29:48 -0700 | [diff] [blame] | 36 | int ret = fscrypt_decrypt_pagecache_blocks(page, bv->bv_len, |
| 37 | bv->bv_offset); |
Eric Biggers | ff5d3a9 | 2019-03-15 14:16:32 -0700 | [diff] [blame] | 38 | if (ret) |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 39 | SetPageError(page); |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 40 | } |
Eric Biggers | 0cb8dae | 2018-04-18 11:09:47 -0700 | [diff] [blame] | 41 | } |
Eric Biggers | 0cb8dae | 2018-04-18 11:09:47 -0700 | [diff] [blame] | 42 | EXPORT_SYMBOL(fscrypt_decrypt_bio); |
| 43 | |
Satya Tangirala | 5fee360 | 2020-07-02 01:56:05 +0000 | [diff] [blame] | 44 | static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, |
| 45 | pgoff_t lblk, sector_t pblk, |
| 46 | unsigned int len) |
| 47 | { |
| 48 | const unsigned int blockbits = inode->i_blkbits; |
| 49 | const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits); |
| 50 | struct bio *bio; |
| 51 | int ret, err = 0; |
| 52 | int num_pages = 0; |
| 53 | |
| 54 | /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 55 | bio = bio_alloc(GFP_NOFS, BIO_MAX_VECS); |
Satya Tangirala | 5fee360 | 2020-07-02 01:56:05 +0000 | [diff] [blame] | 56 | |
| 57 | while (len) { |
| 58 | unsigned int blocks_this_page = min(len, blocks_per_page); |
| 59 | unsigned int bytes_this_page = blocks_this_page << blockbits; |
| 60 | |
| 61 | if (num_pages == 0) { |
| 62 | fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); |
| 63 | bio_set_dev(bio, inode->i_sb->s_bdev); |
| 64 | bio->bi_iter.bi_sector = |
| 65 | pblk << (blockbits - SECTOR_SHIFT); |
| 66 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
| 67 | } |
| 68 | ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); |
| 69 | if (WARN_ON(ret != bytes_this_page)) { |
| 70 | err = -EIO; |
| 71 | goto out; |
| 72 | } |
| 73 | num_pages++; |
| 74 | len -= blocks_this_page; |
| 75 | lblk += blocks_this_page; |
| 76 | pblk += blocks_this_page; |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 77 | if (num_pages == BIO_MAX_VECS || !len || |
Satya Tangirala | 5fee360 | 2020-07-02 01:56:05 +0000 | [diff] [blame] | 78 | !fscrypt_mergeable_bio(bio, inode, lblk)) { |
| 79 | err = submit_bio_wait(bio); |
| 80 | if (err) |
| 81 | goto out; |
| 82 | bio_reset(bio); |
| 83 | num_pages = 0; |
| 84 | } |
| 85 | } |
| 86 | out: |
| 87 | bio_put(bio); |
| 88 | return err; |
| 89 | } |
| 90 | |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 91 | /** |
| 92 | * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file |
| 93 | * @inode: the file's inode |
| 94 | * @lblk: the first file logical block to zero out |
| 95 | * @pblk: the first filesystem physical block to zero out |
| 96 | * @len: number of blocks to zero out |
| 97 | * |
| 98 | * Zero out filesystem blocks in an encrypted regular file on-disk, i.e. write |
| 99 | * ciphertext blocks which decrypt to the all-zeroes block. The blocks must be |
| 100 | * both logically and physically contiguous. It's also assumed that the |
| 101 | * filesystem only uses a single block device, ->s_bdev. |
| 102 | * |
| 103 | * Note that since each block uses a different IV, this involves writing a |
| 104 | * different ciphertext to each block; we can't simply reuse the same one. |
| 105 | * |
| 106 | * Return: 0 on success; -errno on failure. |
| 107 | */ |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 108 | int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 109 | sector_t pblk, unsigned int len) |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 110 | { |
Eric Biggers | 930d453 | 2019-05-20 09:29:45 -0700 | [diff] [blame] | 111 | const unsigned int blockbits = inode->i_blkbits; |
| 112 | const unsigned int blocksize = 1 << blockbits; |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 113 | const unsigned int blocks_per_page_bits = PAGE_SHIFT - blockbits; |
| 114 | const unsigned int blocks_per_page = 1 << blocks_per_page_bits; |
| 115 | struct page *pages[16]; /* write up to 16 pages at a time */ |
| 116 | unsigned int nr_pages; |
| 117 | unsigned int i; |
| 118 | unsigned int offset; |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 119 | struct bio *bio; |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 120 | int ret, err; |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 121 | |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 122 | if (len == 0) |
| 123 | return 0; |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 124 | |
Satya Tangirala | 5fee360 | 2020-07-02 01:56:05 +0000 | [diff] [blame] | 125 | if (fscrypt_inode_uses_inline_crypto(inode)) |
| 126 | return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk, |
| 127 | len); |
| 128 | |
Christoph Hellwig | a8affc0 | 2021-03-11 12:01:37 +0100 | [diff] [blame] | 129 | BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_VECS); |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 130 | nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), |
| 131 | (len + blocks_per_page - 1) >> blocks_per_page_bits); |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 132 | |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 133 | /* |
| 134 | * We need at least one page for ciphertext. Allocate the first one |
| 135 | * from a mempool, with __GFP_DIRECT_RECLAIM set so that it can't fail. |
| 136 | * |
| 137 | * Any additional page allocations are allowed to fail, as they only |
| 138 | * help performance, and waiting on the mempool for them could deadlock. |
| 139 | */ |
| 140 | for (i = 0; i < nr_pages; i++) { |
| 141 | pages[i] = fscrypt_alloc_bounce_page(i == 0 ? GFP_NOFS : |
| 142 | GFP_NOWAIT | __GFP_NOWARN); |
| 143 | if (!pages[i]) |
| 144 | break; |
| 145 | } |
| 146 | nr_pages = i; |
| 147 | if (WARN_ON(nr_pages <= 0)) |
| 148 | return -EINVAL; |
| 149 | |
| 150 | /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ |
| 151 | bio = bio_alloc(GFP_NOFS, nr_pages); |
| 152 | |
| 153 | do { |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 154 | bio_set_dev(bio, inode->i_sb->s_bdev); |
Eric Biggers | 930d453 | 2019-05-20 09:29:45 -0700 | [diff] [blame] | 155 | bio->bi_iter.bi_sector = pblk << (blockbits - 9); |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 156 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 157 | |
| 158 | i = 0; |
| 159 | offset = 0; |
| 160 | do { |
| 161 | err = fscrypt_crypt_block(inode, FS_ENCRYPT, lblk, |
| 162 | ZERO_PAGE(0), pages[i], |
| 163 | blocksize, offset, GFP_NOFS); |
| 164 | if (err) |
| 165 | goto out; |
| 166 | lblk++; |
| 167 | pblk++; |
| 168 | len--; |
| 169 | offset += blocksize; |
| 170 | if (offset == PAGE_SIZE || len == 0) { |
| 171 | ret = bio_add_page(bio, pages[i++], offset, 0); |
| 172 | if (WARN_ON(ret != offset)) { |
| 173 | err = -EIO; |
| 174 | goto out; |
| 175 | } |
| 176 | offset = 0; |
| 177 | } |
| 178 | } while (i != nr_pages && len != 0); |
| 179 | |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 180 | err = submit_bio_wait(bio); |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 181 | if (err) |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 182 | goto out; |
| 183 | bio_reset(bio); |
| 184 | } while (len != 0); |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 185 | err = 0; |
Eric Biggers | 796f12d7 | 2019-12-26 10:08:13 -0600 | [diff] [blame] | 186 | out: |
| 187 | bio_put(bio); |
| 188 | for (i = 0; i < nr_pages; i++) |
| 189 | fscrypt_free_bounce_page(pages[i]); |
Richard Weinberger | 58ae746 | 2016-12-19 12:25:32 +0100 | [diff] [blame] | 190 | return err; |
| 191 | } |
| 192 | EXPORT_SYMBOL(fscrypt_zeroout_range); |