Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/fs/block_dev.c |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE |
| 7 | */ |
| 8 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <linux/init.h> |
| 10 | #include <linux/mm.h> |
| 11 | #include <linux/fcntl.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/kmod.h> |
| 14 | #include <linux/major.h> |
Pavel Emelyanov | 7db9cfd | 2008-06-05 22:46:27 -0700 | [diff] [blame] | 15 | #include <linux/device_cgroup.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/highmem.h> |
| 17 | #include <linux/blkdev.h> |
Tejun Heo | 66114ca | 2015-05-22 17:13:32 -0400 | [diff] [blame] | 18 | #include <linux/backing-dev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/module.h> |
| 20 | #include <linux/blkpg.h> |
Muthu Kumar | b502bd1 | 2012-03-23 15:01:50 -0700 | [diff] [blame] | 21 | #include <linux/magic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/buffer_head.h> |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 23 | #include <linux/swap.h> |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 24 | #include <linux/pagevec.h> |
David Howells | 811d736 | 2006-08-29 19:06:09 +0100 | [diff] [blame] | 25 | #include <linux/writeback.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/mpage.h> |
| 27 | #include <linux/mount.h> |
David Howells | 9030d16 | 2019-03-25 16:38:23 +0000 | [diff] [blame] | 28 | #include <linux/pseudo_fs.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/uio.h> |
| 30 | #include <linux/namei.h> |
Vignesh Babu BM | 1368c4f | 2007-05-08 00:24:32 -0700 | [diff] [blame] | 31 | #include <linux/log2.h> |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 32 | #include <linux/cleancache.h> |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 33 | #include <linux/task_io_accounting_ops.h> |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 34 | #include <linux/falloc.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 35 | #include <linux/uaccess.h> |
Domenico Andreoli | 56939e0 | 2020-03-23 08:22:15 -0700 | [diff] [blame] | 36 | #include <linux/suspend.h> |
David Howells | 07f3f05 | 2006-09-30 20:52:18 +0200 | [diff] [blame] | 37 | #include "internal.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
| 39 | struct bdev_inode { |
| 40 | struct block_device bdev; |
| 41 | struct inode vfs_inode; |
| 42 | }; |
| 43 | |
Adrian Bunk | 4c54ac6 | 2008-02-18 13:48:31 +0100 | [diff] [blame] | 44 | static const struct address_space_operations def_blk_aops; |
| 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | static inline struct bdev_inode *BDEV_I(struct inode *inode) |
| 47 | { |
| 48 | return container_of(inode, struct bdev_inode, vfs_inode); |
| 49 | } |
| 50 | |
Geert Uytterhoeven | ff5053f | 2015-06-26 13:58:32 +0200 | [diff] [blame] | 51 | struct block_device *I_BDEV(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | { |
| 53 | return &BDEV_I(inode)->bdev; |
| 54 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | EXPORT_SYMBOL(I_BDEV); |
| 56 | |
Vivek Goyal | dbd3ca5 | 2015-11-09 09:23:40 -0700 | [diff] [blame] | 57 | static void bdev_write_inode(struct block_device *bdev) |
Christoph Hellwig | 564f00f | 2015-01-14 10:42:33 +0100 | [diff] [blame] | 58 | { |
Vivek Goyal | dbd3ca5 | 2015-11-09 09:23:40 -0700 | [diff] [blame] | 59 | struct inode *inode = bdev->bd_inode; |
| 60 | int ret; |
| 61 | |
Christoph Hellwig | 564f00f | 2015-01-14 10:42:33 +0100 | [diff] [blame] | 62 | spin_lock(&inode->i_lock); |
| 63 | while (inode->i_state & I_DIRTY) { |
| 64 | spin_unlock(&inode->i_lock); |
Vivek Goyal | dbd3ca5 | 2015-11-09 09:23:40 -0700 | [diff] [blame] | 65 | ret = write_inode_now(inode, true); |
| 66 | if (ret) { |
| 67 | char name[BDEVNAME_SIZE]; |
| 68 | pr_warn_ratelimited("VFS: Dirty inode writeback failed " |
| 69 | "for block device %s (err=%d).\n", |
| 70 | bdevname(bdev, name), ret); |
| 71 | } |
Christoph Hellwig | 564f00f | 2015-01-14 10:42:33 +0100 | [diff] [blame] | 72 | spin_lock(&inode->i_lock); |
| 73 | } |
| 74 | spin_unlock(&inode->i_lock); |
| 75 | } |
| 76 | |
Peter Zijlstra | f9a1439 | 2007-05-06 14:49:55 -0700 | [diff] [blame] | 77 | /* Kill _all_ buffers and pagecache , dirty or not.. */ |
Zheng Bin | 3373a34 | 2020-06-18 12:21:38 +0800 | [diff] [blame] | 78 | static void kill_bdev(struct block_device *bdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | { |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 80 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
| 81 | |
Ross Zwisler | f9fe48b | 2016-01-22 15:10:40 -0800 | [diff] [blame] | 82 | if (mapping->nrpages == 0 && mapping->nrexceptional == 0) |
Peter Zijlstra | f9a1439 | 2007-05-06 14:49:55 -0700 | [diff] [blame] | 83 | return; |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 84 | |
Peter Zijlstra | f9a1439 | 2007-05-06 14:49:55 -0700 | [diff] [blame] | 85 | invalidate_bh_lrus(); |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 86 | truncate_inode_pages(mapping, 0); |
Zheng Bin | 3373a34 | 2020-06-18 12:21:38 +0800 | [diff] [blame] | 87 | } |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 88 | |
| 89 | /* Invalidate clean unused buffers and pagecache. */ |
| 90 | void invalidate_bdev(struct block_device *bdev) |
| 91 | { |
| 92 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
| 93 | |
Andrey Ryabinin | a5f6a6a | 2017-05-03 14:56:02 -0700 | [diff] [blame] | 94 | if (mapping->nrpages) { |
| 95 | invalidate_bh_lrus(); |
| 96 | lru_add_drain_all(); /* make sure all lru add caches are flushed */ |
| 97 | invalidate_mapping_pages(mapping, 0, -1); |
| 98 | } |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 99 | /* 99% of the time, we don't need to flush the cleancache on the bdev. |
| 100 | * But, for the strange corners, lets be cautious |
| 101 | */ |
Dan Magenheimer | 3167760 | 2011-09-21 11:56:28 -0400 | [diff] [blame] | 102 | cleancache_invalidate_inode(mapping); |
Al Viro | ff01bb4 | 2011-09-16 02:31:11 -0400 | [diff] [blame] | 103 | } |
| 104 | EXPORT_SYMBOL(invalidate_bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
Jan Kara | 384d87e | 2020-09-04 10:58:52 +0200 | [diff] [blame] | 106 | /* |
| 107 | * Drop all buffers & page cache for given bdev range. This function bails |
| 108 | * with error if bdev has other exclusive owner (such as filesystem). |
| 109 | */ |
| 110 | int truncate_bdev_range(struct block_device *bdev, fmode_t mode, |
| 111 | loff_t lstart, loff_t lend) |
| 112 | { |
| 113 | struct block_device *claimed_bdev = NULL; |
| 114 | int err; |
| 115 | |
| 116 | /* |
| 117 | * If we don't hold exclusive handle for the device, upgrade to it |
| 118 | * while we discard the buffer cache to avoid discarding buffers |
| 119 | * under live filesystem. |
| 120 | */ |
| 121 | if (!(mode & FMODE_EXCL)) { |
| 122 | claimed_bdev = bdev->bd_contains; |
| 123 | err = bd_prepare_to_claim(bdev, claimed_bdev, |
| 124 | truncate_bdev_range); |
| 125 | if (err) |
Jan Kara | d44c978 | 2021-02-22 10:48:09 +0100 | [diff] [blame] | 126 | goto invalidate; |
Jan Kara | 384d87e | 2020-09-04 10:58:52 +0200 | [diff] [blame] | 127 | } |
| 128 | truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); |
| 129 | if (claimed_bdev) |
| 130 | bd_abort_claiming(bdev, claimed_bdev, truncate_bdev_range); |
| 131 | return 0; |
Jan Kara | d44c978 | 2021-02-22 10:48:09 +0100 | [diff] [blame] | 132 | |
| 133 | invalidate: |
| 134 | /* |
| 135 | * Someone else has handle exclusively open. Try invalidating instead. |
| 136 | * The 'end' argument is inclusive so the rounding is safe. |
| 137 | */ |
| 138 | return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, |
| 139 | lstart >> PAGE_SHIFT, |
| 140 | lend >> PAGE_SHIFT); |
Jan Kara | 384d87e | 2020-09-04 10:58:52 +0200 | [diff] [blame] | 141 | } |
| 142 | EXPORT_SYMBOL(truncate_bdev_range); |
| 143 | |
Jan Kara | 04906b2 | 2019-01-14 09:48:10 +0100 | [diff] [blame] | 144 | static void set_init_blocksize(struct block_device *bdev) |
| 145 | { |
Maxim Mikityanskiy | f39005e | 2021-01-26 21:59:07 +0200 | [diff] [blame] | 146 | unsigned int bsize = bdev_logical_block_size(bdev); |
| 147 | loff_t size = i_size_read(bdev->bd_inode); |
| 148 | |
| 149 | while (bsize < PAGE_SIZE) { |
| 150 | if (size & bsize) |
| 151 | break; |
| 152 | bsize <<= 1; |
| 153 | } |
| 154 | bdev->bd_inode->i_blkbits = blksize_bits(bsize); |
Jan Kara | 04906b2 | 2019-01-14 09:48:10 +0100 | [diff] [blame] | 155 | } |
| 156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | int set_blocksize(struct block_device *bdev, int size) |
| 158 | { |
| 159 | /* Size must be a power of two, and between 512 and PAGE_SIZE */ |
Vignesh Babu BM | 1368c4f | 2007-05-08 00:24:32 -0700 | [diff] [blame] | 160 | if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | return -EINVAL; |
| 162 | |
| 163 | /* Size cannot be smaller than the size supported by the device */ |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 164 | if (size < bdev_logical_block_size(bdev)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | return -EINVAL; |
| 166 | |
| 167 | /* Don't change the size if it is same as current */ |
Christoph Hellwig | 6b7b181 | 2020-06-26 10:01:55 +0200 | [diff] [blame] | 168 | if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | sync_blockdev(bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | bdev->bd_inode->i_blkbits = blksize_bits(size); |
| 171 | kill_bdev(bdev); |
| 172 | } |
| 173 | return 0; |
| 174 | } |
| 175 | |
| 176 | EXPORT_SYMBOL(set_blocksize); |
| 177 | |
| 178 | int sb_set_blocksize(struct super_block *sb, int size) |
| 179 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | if (set_blocksize(sb->s_bdev, size)) |
| 181 | return 0; |
| 182 | /* If we get here, we know size is power of two |
| 183 | * and it's value is between 512 and PAGE_SIZE */ |
| 184 | sb->s_blocksize = size; |
Coywolf Qi Hunt | 38885bd | 2006-03-24 03:18:05 -0800 | [diff] [blame] | 185 | sb->s_blocksize_bits = blksize_bits(size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | return sb->s_blocksize; |
| 187 | } |
| 188 | |
Greg Kroah-Hartman | d483eed | 2020-07-02 12:51:03 +0200 | [diff] [blame] | 189 | EXPORT_SYMBOL_NS(sb_set_blocksize, ANDROID_GKI_VFS_EXPORT_ONLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | |
| 191 | int sb_min_blocksize(struct super_block *sb, int size) |
| 192 | { |
Martin K. Petersen | e1defc4 | 2009-05-22 17:17:49 -0400 | [diff] [blame] | 193 | int minsize = bdev_logical_block_size(sb->s_bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | if (size < minsize) |
| 195 | size = minsize; |
| 196 | return sb_set_blocksize(sb, size); |
| 197 | } |
| 198 | |
Greg Kroah-Hartman | d483eed | 2020-07-02 12:51:03 +0200 | [diff] [blame] | 199 | EXPORT_SYMBOL_NS(sb_min_blocksize, ANDROID_GKI_VFS_EXPORT_ONLY); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
| 201 | static int |
| 202 | blkdev_get_block(struct inode *inode, sector_t iblock, |
| 203 | struct buffer_head *bh, int create) |
| 204 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | bh->b_bdev = I_BDEV(inode); |
| 206 | bh->b_blocknr = iblock; |
| 207 | set_buffer_mapped(bh); |
| 208 | return 0; |
| 209 | } |
| 210 | |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 211 | static struct inode *bdev_file_inode(struct file *file) |
| 212 | { |
| 213 | return file->f_mapping->host; |
| 214 | } |
| 215 | |
Jens Axboe | 78250c0 | 2016-11-17 17:50:47 +0100 | [diff] [blame] | 216 | static unsigned int dio_bio_write_op(struct kiocb *iocb) |
| 217 | { |
| 218 | unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; |
| 219 | |
| 220 | /* avoid the need for a I/O completion work item */ |
| 221 | if (iocb->ki_flags & IOCB_DSYNC) |
| 222 | op |= REQ_FUA; |
| 223 | return op; |
| 224 | } |
| 225 | |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 226 | #define DIO_INLINE_BIO_VECS 4 |
| 227 | |
| 228 | static void blkdev_bio_end_io_simple(struct bio *bio) |
| 229 | { |
| 230 | struct task_struct *waiter = bio->bi_private; |
| 231 | |
| 232 | WRITE_ONCE(bio->bi_private, NULL); |
Jens Axboe | 0619317 | 2018-11-13 21:16:54 -0700 | [diff] [blame] | 233 | blk_wake_io_task(waiter); |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 234 | } |
| 235 | |
| 236 | static ssize_t |
| 237 | __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, |
| 238 | int nr_pages) |
| 239 | { |
| 240 | struct file *file = iocb->ki_filp; |
| 241 | struct block_device *bdev = I_BDEV(bdev_file_inode(file)); |
Christoph Hellwig | 9fec4a2 | 2019-06-26 15:49:26 +0200 | [diff] [blame] | 242 | struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs; |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 243 | loff_t pos = iocb->ki_pos; |
| 244 | bool should_dirty = false; |
| 245 | struct bio bio; |
| 246 | ssize_t ret; |
| 247 | blk_qc_t qc; |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 248 | |
Jens Axboe | 9a794fb | 2016-11-22 08:12:39 -0700 | [diff] [blame] | 249 | if ((pos | iov_iter_alignment(iter)) & |
| 250 | (bdev_logical_block_size(bdev) - 1)) |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 251 | return -EINVAL; |
| 252 | |
Jens Axboe | 72ecad2 | 2016-11-16 23:11:42 -0700 | [diff] [blame] | 253 | if (nr_pages <= DIO_INLINE_BIO_VECS) |
| 254 | vecs = inline_vecs; |
| 255 | else { |
Kees Cook | 6da2ec5 | 2018-06-12 13:55:00 -0700 | [diff] [blame] | 256 | vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec), |
| 257 | GFP_KERNEL); |
Jens Axboe | 72ecad2 | 2016-11-16 23:11:42 -0700 | [diff] [blame] | 258 | if (!vecs) |
| 259 | return -ENOMEM; |
| 260 | } |
| 261 | |
Ming Lei | 3a83f46 | 2016-11-22 08:57:21 -0700 | [diff] [blame] | 262 | bio_init(&bio, vecs, nr_pages); |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 263 | bio_set_dev(&bio, bdev); |
Damien Le Moal | 4d1a476 | 2016-11-22 15:38:49 +0900 | [diff] [blame] | 264 | bio.bi_iter.bi_sector = pos >> 9; |
Jens Axboe | 45d06cf | 2017-06-27 11:01:22 -0600 | [diff] [blame] | 265 | bio.bi_write_hint = iocb->ki_hint; |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 266 | bio.bi_private = current; |
| 267 | bio.bi_end_io = blkdev_bio_end_io_simple; |
Adam Manzanares | 074111c | 2018-05-22 10:52:20 -0700 | [diff] [blame] | 268 | bio.bi_ioprio = iocb->ki_ioprio; |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 269 | |
| 270 | ret = bio_iov_iter_get_pages(&bio, iter); |
| 271 | if (unlikely(ret)) |
Martin Wilck | 9362dd1 | 2018-07-25 23:15:08 +0200 | [diff] [blame] | 272 | goto out; |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 273 | ret = bio.bi_iter.bi_size; |
| 274 | |
| 275 | if (iov_iter_rw(iter) == READ) { |
Jens Axboe | 78250c0 | 2016-11-17 17:50:47 +0100 | [diff] [blame] | 276 | bio.bi_opf = REQ_OP_READ; |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 277 | if (iter_is_iovec(iter)) |
| 278 | should_dirty = true; |
| 279 | } else { |
Jens Axboe | 78250c0 | 2016-11-17 17:50:47 +0100 | [diff] [blame] | 280 | bio.bi_opf = dio_bio_write_op(iocb); |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 281 | task_io_account_write(ret); |
| 282 | } |
Pavel Begunkov | 5402a67 | 2020-11-20 17:10:28 +0000 | [diff] [blame] | 283 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 284 | bio.bi_opf |= REQ_NOWAIT; |
Jens Axboe | d1e3628 | 2018-08-29 10:36:56 -0600 | [diff] [blame] | 285 | if (iocb->ki_flags & IOCB_HIPRI) |
Jens Axboe | 0bbb280 | 2018-12-21 09:10:46 -0700 | [diff] [blame] | 286 | bio_set_polled(&bio, iocb); |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 287 | |
| 288 | qc = submit_bio(&bio); |
| 289 | for (;;) { |
Linus Torvalds | 1ac5cd4 | 2019-01-02 10:46:03 -0800 | [diff] [blame] | 290 | set_current_state(TASK_UNINTERRUPTIBLE); |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 291 | if (!READ_ONCE(bio.bi_private)) |
| 292 | break; |
| 293 | if (!(iocb->ki_flags & IOCB_HIPRI) || |
Jens Axboe | 0a1b8b8 | 2018-11-26 08:24:43 -0700 | [diff] [blame] | 294 | !blk_poll(bdev_get_queue(bdev), qc, true)) |
Ming Lei | e6249cd | 2020-05-03 09:54:22 +0800 | [diff] [blame] | 295 | blk_io_schedule(); |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 296 | } |
| 297 | __set_current_state(TASK_RUNNING); |
| 298 | |
Christoph Hellwig | 9fec4a2 | 2019-06-26 15:49:26 +0200 | [diff] [blame] | 299 | bio_release_pages(&bio, should_dirty); |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 300 | if (unlikely(bio.bi_status)) |
Linus Torvalds | c6b1e36 | 2017-07-03 10:34:51 -0700 | [diff] [blame] | 301 | ret = blk_status_to_errno(bio.bi_status); |
Jens Axboe | 9ae3b3f5 | 2017-06-28 15:30:13 -0600 | [diff] [blame] | 302 | |
Martin Wilck | 9362dd1 | 2018-07-25 23:15:08 +0200 | [diff] [blame] | 303 | out: |
| 304 | if (vecs != inline_vecs) |
| 305 | kfree(vecs); |
| 306 | |
Jens Axboe | 9ae3b3f5 | 2017-06-28 15:30:13 -0600 | [diff] [blame] | 307 | bio_uninit(&bio); |
| 308 | |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 309 | return ret; |
| 310 | } |
| 311 | |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 312 | struct blkdev_dio { |
| 313 | union { |
| 314 | struct kiocb *iocb; |
| 315 | struct task_struct *waiter; |
| 316 | }; |
| 317 | size_t size; |
| 318 | atomic_t ref; |
| 319 | bool multi_bio : 1; |
| 320 | bool should_dirty : 1; |
| 321 | bool is_sync : 1; |
| 322 | struct bio bio; |
| 323 | }; |
| 324 | |
Kent Overstreet | 52190f8 | 2018-05-20 18:25:55 -0400 | [diff] [blame] | 325 | static struct bio_set blkdev_dio_pool; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 326 | |
Christoph Hellwig | eae83ce | 2018-11-30 08:31:52 -0700 | [diff] [blame] | 327 | static int blkdev_iopoll(struct kiocb *kiocb, bool wait) |
| 328 | { |
| 329 | struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); |
| 330 | struct request_queue *q = bdev_get_queue(bdev); |
| 331 | |
| 332 | return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); |
| 333 | } |
| 334 | |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 335 | static void blkdev_bio_end_io(struct bio *bio) |
| 336 | { |
| 337 | struct blkdev_dio *dio = bio->bi_private; |
| 338 | bool should_dirty = dio->should_dirty; |
| 339 | |
Jason Yan | a89afe5 | 2019-04-12 10:09:16 +0800 | [diff] [blame] | 340 | if (bio->bi_status && !dio->bio.bi_status) |
| 341 | dio->bio.bi_status = bio->bi_status; |
| 342 | |
| 343 | if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 344 | if (!dio->is_sync) { |
| 345 | struct kiocb *iocb = dio->iocb; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 346 | ssize_t ret; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 347 | |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 348 | if (likely(!dio->bio.bi_status)) { |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 349 | ret = dio->size; |
| 350 | iocb->ki_pos += ret; |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 351 | } else { |
| 352 | ret = blk_status_to_errno(dio->bio.bi_status); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 353 | } |
| 354 | |
| 355 | dio->iocb->ki_complete(iocb, ret, 0); |
Christoph Hellwig | 531724a | 2018-11-30 09:23:48 +0100 | [diff] [blame] | 356 | if (dio->multi_bio) |
| 357 | bio_put(&dio->bio); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 358 | } else { |
| 359 | struct task_struct *waiter = dio->waiter; |
| 360 | |
| 361 | WRITE_ONCE(dio->waiter, NULL); |
Jens Axboe | 0619317 | 2018-11-13 21:16:54 -0700 | [diff] [blame] | 362 | blk_wake_io_task(waiter); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 363 | } |
| 364 | } |
| 365 | |
| 366 | if (should_dirty) { |
| 367 | bio_check_pages_dirty(bio); |
| 368 | } else { |
Christoph Hellwig | 57dfe3c | 2019-06-26 15:49:25 +0200 | [diff] [blame] | 369 | bio_release_pages(bio, false); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 370 | bio_put(bio); |
| 371 | } |
| 372 | } |
| 373 | |
Andrew Morton | b2e895d | 2007-02-03 01:14:01 -0800 | [diff] [blame] | 374 | static ssize_t |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 375 | __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) |
Andrew Morton | b2e895d | 2007-02-03 01:14:01 -0800 | [diff] [blame] | 376 | { |
| 377 | struct file *file = iocb->ki_filp; |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 378 | struct inode *inode = bdev_file_inode(file); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 379 | struct block_device *bdev = I_BDEV(inode); |
Christoph Hellwig | 64d656a | 2016-12-22 19:20:45 +0100 | [diff] [blame] | 380 | struct blk_plug plug; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 381 | struct blkdev_dio *dio; |
| 382 | struct bio *bio; |
Jens Axboe | cb700eb | 2018-11-15 19:56:53 -0700 | [diff] [blame] | 383 | bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; |
Christoph Hellwig | 690e532 | 2017-01-24 14:50:19 +0100 | [diff] [blame] | 384 | bool is_read = (iov_iter_rw(iter) == READ), is_sync; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 385 | loff_t pos = iocb->ki_pos; |
| 386 | blk_qc_t qc = BLK_QC_T_NONE; |
Jens Axboe | 7b6620d | 2019-08-15 11:09:16 -0600 | [diff] [blame] | 387 | int ret = 0; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 388 | |
Jens Axboe | 9a794fb | 2016-11-22 08:12:39 -0700 | [diff] [blame] | 389 | if ((pos | iov_iter_alignment(iter)) & |
| 390 | (bdev_logical_block_size(bdev) - 1)) |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 391 | return -EINVAL; |
| 392 | |
Jens Axboe | 7b6620d | 2019-08-15 11:09:16 -0600 | [diff] [blame] | 393 | bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 394 | |
| 395 | dio = container_of(bio, struct blkdev_dio, bio); |
Christoph Hellwig | 690e532 | 2017-01-24 14:50:19 +0100 | [diff] [blame] | 396 | dio->is_sync = is_sync = is_sync_kiocb(iocb); |
Christoph Hellwig | 531724a | 2018-11-30 09:23:48 +0100 | [diff] [blame] | 397 | if (dio->is_sync) { |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 398 | dio->waiter = current; |
Christoph Hellwig | 531724a | 2018-11-30 09:23:48 +0100 | [diff] [blame] | 399 | bio_get(bio); |
| 400 | } else { |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 401 | dio->iocb = iocb; |
Christoph Hellwig | 531724a | 2018-11-30 09:23:48 +0100 | [diff] [blame] | 402 | } |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 403 | |
| 404 | dio->size = 0; |
| 405 | dio->multi_bio = false; |
David Howells | 00e2370 | 2018-10-22 13:07:28 +0100 | [diff] [blame] | 406 | dio->should_dirty = is_read && iter_is_iovec(iter); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 407 | |
Jens Axboe | cb700eb | 2018-11-15 19:56:53 -0700 | [diff] [blame] | 408 | /* |
| 409 | * Don't plug for HIPRI/polled IO, as those should go straight |
| 410 | * to issue |
| 411 | */ |
| 412 | if (!is_poll) |
| 413 | blk_start_plug(&plug); |
| 414 | |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 415 | for (;;) { |
Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 416 | bio_set_dev(bio, bdev); |
Damien Le Moal | 4d1a476 | 2016-11-22 15:38:49 +0900 | [diff] [blame] | 417 | bio->bi_iter.bi_sector = pos >> 9; |
Jens Axboe | 45d06cf | 2017-06-27 11:01:22 -0600 | [diff] [blame] | 418 | bio->bi_write_hint = iocb->ki_hint; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 419 | bio->bi_private = dio; |
| 420 | bio->bi_end_io = blkdev_bio_end_io; |
Adam Manzanares | 074111c | 2018-05-22 10:52:20 -0700 | [diff] [blame] | 421 | bio->bi_ioprio = iocb->ki_ioprio; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 422 | |
Jens Axboe | e15c2ff | 2019-08-06 13:34:31 -0600 | [diff] [blame] | 423 | ret = bio_iov_iter_get_pages(bio, iter); |
| 424 | if (unlikely(ret)) { |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 425 | bio->bi_status = BLK_STS_IOERR; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 426 | bio_endio(bio); |
| 427 | break; |
| 428 | } |
| 429 | |
| 430 | if (is_read) { |
| 431 | bio->bi_opf = REQ_OP_READ; |
| 432 | if (dio->should_dirty) |
| 433 | bio_set_pages_dirty(bio); |
| 434 | } else { |
| 435 | bio->bi_opf = dio_bio_write_op(iocb); |
| 436 | task_io_account_write(bio->bi_iter.bi_size); |
| 437 | } |
Pavel Begunkov | 5402a67 | 2020-11-20 17:10:28 +0000 | [diff] [blame] | 438 | if (iocb->ki_flags & IOCB_NOWAIT) |
| 439 | bio->bi_opf |= REQ_NOWAIT; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 440 | |
Jens Axboe | 7b6620d | 2019-08-15 11:09:16 -0600 | [diff] [blame] | 441 | dio->size += bio->bi_iter.bi_size; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 442 | pos += bio->bi_iter.bi_size; |
| 443 | |
| 444 | nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES); |
| 445 | if (!nr_pages) { |
Christoph Hellwig | eae83ce | 2018-11-30 08:31:52 -0700 | [diff] [blame] | 446 | bool polled = false; |
| 447 | |
| 448 | if (iocb->ki_flags & IOCB_HIPRI) { |
Jens Axboe | 0bbb280 | 2018-12-21 09:10:46 -0700 | [diff] [blame] | 449 | bio_set_polled(bio, iocb); |
Christoph Hellwig | eae83ce | 2018-11-30 08:31:52 -0700 | [diff] [blame] | 450 | polled = true; |
| 451 | } |
Jens Axboe | d34513d | 2018-11-06 14:29:11 -0700 | [diff] [blame] | 452 | |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 453 | qc = submit_bio(bio); |
Christoph Hellwig | eae83ce | 2018-11-30 08:31:52 -0700 | [diff] [blame] | 454 | |
| 455 | if (polled) |
| 456 | WRITE_ONCE(iocb->ki_cookie, qc); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 457 | break; |
| 458 | } |
| 459 | |
| 460 | if (!dio->multi_bio) { |
Christoph Hellwig | 531724a | 2018-11-30 09:23:48 +0100 | [diff] [blame] | 461 | /* |
| 462 | * AIO needs an extra reference to ensure the dio |
| 463 | * structure which is embedded into the first bio |
| 464 | * stays around. |
| 465 | */ |
| 466 | if (!is_sync) |
| 467 | bio_get(bio); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 468 | dio->multi_bio = true; |
| 469 | atomic_set(&dio->ref, 2); |
| 470 | } else { |
| 471 | atomic_inc(&dio->ref); |
| 472 | } |
| 473 | |
Jens Axboe | 7b6620d | 2019-08-15 11:09:16 -0600 | [diff] [blame] | 474 | submit_bio(bio); |
| 475 | bio = bio_alloc(GFP_KERNEL, nr_pages); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 476 | } |
Jens Axboe | cb700eb | 2018-11-15 19:56:53 -0700 | [diff] [blame] | 477 | |
| 478 | if (!is_poll) |
| 479 | blk_finish_plug(&plug); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 480 | |
Christoph Hellwig | 690e532 | 2017-01-24 14:50:19 +0100 | [diff] [blame] | 481 | if (!is_sync) |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 482 | return -EIOCBQUEUED; |
| 483 | |
| 484 | for (;;) { |
Linus Torvalds | 1ac5cd4 | 2019-01-02 10:46:03 -0800 | [diff] [blame] | 485 | set_current_state(TASK_UNINTERRUPTIBLE); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 486 | if (!READ_ONCE(dio->waiter)) |
| 487 | break; |
| 488 | |
| 489 | if (!(iocb->ki_flags & IOCB_HIPRI) || |
Jens Axboe | 0a1b8b8 | 2018-11-26 08:24:43 -0700 | [diff] [blame] | 490 | !blk_poll(bdev_get_queue(bdev), qc, true)) |
Ming Lei | e6249cd | 2020-05-03 09:54:22 +0800 | [diff] [blame] | 491 | blk_io_schedule(); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 492 | } |
| 493 | __set_current_state(TASK_RUNNING); |
| 494 | |
Christoph Hellwig | 36ffc6c | 2017-06-03 09:38:00 +0200 | [diff] [blame] | 495 | if (!ret) |
Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 496 | ret = blk_status_to_errno(dio->bio.bi_status); |
Jens Axboe | e15c2ff | 2019-08-06 13:34:31 -0600 | [diff] [blame] | 497 | if (likely(!ret)) |
| 498 | ret = dio->size; |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 499 | |
| 500 | bio_put(&dio->bio); |
| 501 | return ret; |
| 502 | } |
| 503 | |
| 504 | static ssize_t |
| 505 | blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) |
| 506 | { |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 507 | int nr_pages; |
Andrew Morton | b2e895d | 2007-02-03 01:14:01 -0800 | [diff] [blame] | 508 | |
Jens Axboe | 72ecad2 | 2016-11-16 23:11:42 -0700 | [diff] [blame] | 509 | nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1); |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 510 | if (!nr_pages) |
| 511 | return 0; |
Jens Axboe | 72ecad2 | 2016-11-16 23:11:42 -0700 | [diff] [blame] | 512 | if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES) |
Christoph Hellwig | 189ce2b | 2016-10-31 11:59:25 -0600 | [diff] [blame] | 513 | return __blkdev_direct_IO_simple(iocb, iter, nr_pages); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 514 | |
| 515 | return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES)); |
Andrew Morton | b2e895d | 2007-02-03 01:14:01 -0800 | [diff] [blame] | 516 | } |
| 517 | |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 518 | static __init int blkdev_init(void) |
| 519 | { |
Kent Overstreet | 52190f8 | 2018-05-20 18:25:55 -0400 | [diff] [blame] | 520 | return bioset_init(&blkdev_dio_pool, 4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS); |
Christoph Hellwig | 542ff7b | 2016-11-16 23:14:22 -0700 | [diff] [blame] | 521 | } |
| 522 | module_init(blkdev_init); |
| 523 | |
Jan Kara | 5cee581 | 2009-04-27 16:43:51 +0200 | [diff] [blame] | 524 | int __sync_blockdev(struct block_device *bdev, int wait) |
| 525 | { |
| 526 | if (!bdev) |
| 527 | return 0; |
| 528 | if (!wait) |
| 529 | return filemap_flush(bdev->bd_inode->i_mapping); |
| 530 | return filemap_write_and_wait(bdev->bd_inode->i_mapping); |
| 531 | } |
| 532 | |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 533 | /* |
| 534 | * Write out and wait upon all the dirty data associated with a block |
| 535 | * device via its mapping. Does not take the superblock lock. |
| 536 | */ |
| 537 | int sync_blockdev(struct block_device *bdev) |
| 538 | { |
Jan Kara | 5cee581 | 2009-04-27 16:43:51 +0200 | [diff] [blame] | 539 | return __sync_blockdev(bdev, 1); |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 540 | } |
| 541 | EXPORT_SYMBOL(sync_blockdev); |
| 542 | |
| 543 | /* |
| 544 | * Write out and wait upon all dirty data associated with this |
| 545 | * device. Filesystem data as well as the underlying block |
| 546 | * device. Takes the superblock lock. |
| 547 | */ |
| 548 | int fsync_bdev(struct block_device *bdev) |
| 549 | { |
| 550 | struct super_block *sb = get_super(bdev); |
| 551 | if (sb) { |
Jan Kara | 60b0680 | 2009-04-27 16:43:53 +0200 | [diff] [blame] | 552 | int res = sync_filesystem(sb); |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 553 | drop_super(sb); |
| 554 | return res; |
| 555 | } |
| 556 | return sync_blockdev(bdev); |
| 557 | } |
Al Viro | 47e4491 | 2009-04-01 07:07:16 -0400 | [diff] [blame] | 558 | EXPORT_SYMBOL(fsync_bdev); |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 559 | |
| 560 | /** |
| 561 | * freeze_bdev -- lock a filesystem and force it into a consistent state |
| 562 | * @bdev: blockdevice to lock |
| 563 | * |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 564 | * If a superblock is found on this device, we take the s_umount semaphore |
| 565 | * on it to make sure nobody unmounts until the snapshot creation is done. |
| 566 | * The reference counter (bd_fsfreeze_count) guarantees that only the last |
| 567 | * unfreeze process can unfreeze the frozen filesystem actually when multiple |
| 568 | * freeze requests arrive simultaneously. It counts up in freeze_bdev() and |
| 569 | * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze |
| 570 | * actually. |
| 571 | */ |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 572 | int freeze_bdev(struct block_device *bdev) |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 573 | { |
| 574 | struct super_block *sb; |
| 575 | int error = 0; |
| 576 | |
| 577 | mutex_lock(&bdev->bd_fsfreeze_mutex); |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 578 | if (++bdev->bd_fsfreeze_count > 1) |
| 579 | goto done; |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 580 | |
Christoph Hellwig | 4504230 | 2009-08-03 23:28:35 +0200 | [diff] [blame] | 581 | sb = get_active_super(bdev); |
| 582 | if (!sb) |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 583 | goto sync; |
Benjamin Marzinski | 48b6bca | 2014-11-13 20:42:03 -0600 | [diff] [blame] | 584 | if (sb->s_op->freeze_super) |
| 585 | error = sb->s_op->freeze_super(sb); |
| 586 | else |
| 587 | error = freeze_super(sb); |
Josef Bacik | 18e9e51 | 2010-03-23 10:34:56 -0400 | [diff] [blame] | 588 | deactivate_super(sb); |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 589 | |
| 590 | if (error) { |
| 591 | bdev->bd_fsfreeze_count--; |
| 592 | goto done; |
| 593 | } |
| 594 | bdev->bd_fsfreeze_sb = sb; |
| 595 | |
| 596 | sync: |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 597 | sync_blockdev(bdev); |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 598 | done: |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 599 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 600 | return error; |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 601 | } |
| 602 | EXPORT_SYMBOL(freeze_bdev); |
| 603 | |
| 604 | /** |
| 605 | * thaw_bdev -- unlock filesystem |
| 606 | * @bdev: blockdevice to unlock |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 607 | * |
| 608 | * Unlocks the filesystem and marks it writeable again after freeze_bdev(). |
| 609 | */ |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 610 | int thaw_bdev(struct block_device *bdev) |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 611 | { |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 612 | struct super_block *sb; |
Christoph Hellwig | 4504230 | 2009-08-03 23:28:35 +0200 | [diff] [blame] | 613 | int error = -EINVAL; |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 614 | |
| 615 | mutex_lock(&bdev->bd_fsfreeze_mutex); |
Christoph Hellwig | 4504230 | 2009-08-03 23:28:35 +0200 | [diff] [blame] | 616 | if (!bdev->bd_fsfreeze_count) |
Josef Bacik | 18e9e51 | 2010-03-23 10:34:56 -0400 | [diff] [blame] | 617 | goto out; |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 618 | |
Christoph Hellwig | 4504230 | 2009-08-03 23:28:35 +0200 | [diff] [blame] | 619 | error = 0; |
| 620 | if (--bdev->bd_fsfreeze_count > 0) |
Josef Bacik | 18e9e51 | 2010-03-23 10:34:56 -0400 | [diff] [blame] | 621 | goto out; |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 622 | |
Christoph Hellwig | d96cd09a | 2020-11-24 11:54:06 +0100 | [diff] [blame] | 623 | sb = bdev->bd_fsfreeze_sb; |
Christoph Hellwig | 4504230 | 2009-08-03 23:28:35 +0200 | [diff] [blame] | 624 | if (!sb) |
Josef Bacik | 18e9e51 | 2010-03-23 10:34:56 -0400 | [diff] [blame] | 625 | goto out; |
Christoph Hellwig | 4504230 | 2009-08-03 23:28:35 +0200 | [diff] [blame] | 626 | |
Benjamin Marzinski | 48b6bca | 2014-11-13 20:42:03 -0600 | [diff] [blame] | 627 | if (sb->s_op->thaw_super) |
| 628 | error = sb->s_op->thaw_super(sb); |
| 629 | else |
| 630 | error = thaw_super(sb); |
Pierre Morel | 997198b | 2016-10-04 10:53:40 +0200 | [diff] [blame] | 631 | if (error) |
Josef Bacik | 18e9e51 | 2010-03-23 10:34:56 -0400 | [diff] [blame] | 632 | bdev->bd_fsfreeze_count++; |
Josef Bacik | 18e9e51 | 2010-03-23 10:34:56 -0400 | [diff] [blame] | 633 | out: |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 634 | mutex_unlock(&bdev->bd_fsfreeze_mutex); |
Pierre Morel | 997198b | 2016-10-04 10:53:40 +0200 | [diff] [blame] | 635 | return error; |
Nick Piggin | 585d3bc | 2009-02-25 10:44:19 +0100 | [diff] [blame] | 636 | } |
| 637 | EXPORT_SYMBOL(thaw_bdev); |
| 638 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | static int blkdev_writepage(struct page *page, struct writeback_control *wbc) |
| 640 | { |
| 641 | return block_write_full_page(page, blkdev_get_block, wbc); |
| 642 | } |
| 643 | |
| 644 | static int blkdev_readpage(struct file * file, struct page * page) |
| 645 | { |
| 646 | return block_read_full_page(page, blkdev_get_block); |
| 647 | } |
| 648 | |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 649 | static void blkdev_readahead(struct readahead_control *rac) |
Akinobu Mita | 447f05b | 2014-10-09 15:26:58 -0700 | [diff] [blame] | 650 | { |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 651 | mpage_readahead(rac, blkdev_get_block); |
Akinobu Mita | 447f05b | 2014-10-09 15:26:58 -0700 | [diff] [blame] | 652 | } |
| 653 | |
Nick Piggin | 6272b5a | 2007-10-16 01:25:04 -0700 | [diff] [blame] | 654 | static int blkdev_write_begin(struct file *file, struct address_space *mapping, |
| 655 | loff_t pos, unsigned len, unsigned flags, |
| 656 | struct page **pagep, void **fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 657 | { |
Christoph Hellwig | 155130a | 2010-06-04 11:29:58 +0200 | [diff] [blame] | 658 | return block_write_begin(mapping, pos, len, flags, pagep, |
| 659 | blkdev_get_block); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | } |
| 661 | |
Nick Piggin | 6272b5a | 2007-10-16 01:25:04 -0700 | [diff] [blame] | 662 | static int blkdev_write_end(struct file *file, struct address_space *mapping, |
| 663 | loff_t pos, unsigned len, unsigned copied, |
| 664 | struct page *page, void *fsdata) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | { |
Nick Piggin | 6272b5a | 2007-10-16 01:25:04 -0700 | [diff] [blame] | 666 | int ret; |
| 667 | ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); |
| 668 | |
| 669 | unlock_page(page); |
Kirill A. Shutemov | 09cbfea | 2016-04-01 15:29:47 +0300 | [diff] [blame] | 670 | put_page(page); |
Nick Piggin | 6272b5a | 2007-10-16 01:25:04 -0700 | [diff] [blame] | 671 | |
| 672 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | } |
| 674 | |
| 675 | /* |
| 676 | * private llseek: |
Al Viro | 496ad9a | 2013-01-23 17:07:38 -0500 | [diff] [blame] | 677 | * for a block special file file_inode(file)->i_size is zero |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | * so we compute the size by hand (just as in block_read/write above) |
| 679 | */ |
Andrew Morton | 965c8e5 | 2012-12-17 15:59:39 -0800 | [diff] [blame] | 680 | static loff_t block_llseek(struct file *file, loff_t offset, int whence) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 681 | { |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 682 | struct inode *bd_inode = bdev_file_inode(file); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 683 | loff_t retval; |
| 684 | |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 685 | inode_lock(bd_inode); |
Al Viro | 5d48f3a | 2013-06-23 21:34:45 +0400 | [diff] [blame] | 686 | retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode)); |
Al Viro | 5955102 | 2016-01-22 15:40:57 -0500 | [diff] [blame] | 687 | inode_unlock(bd_inode); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | return retval; |
| 689 | } |
| 690 | |
Josef Bacik | 02c24a8 | 2011-07-16 20:44:56 -0400 | [diff] [blame] | 691 | int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | { |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 693 | struct inode *bd_inode = bdev_file_inode(filp); |
Anton Blanchard | b8af67e | 2010-04-23 13:18:06 -0400 | [diff] [blame] | 694 | struct block_device *bdev = I_BDEV(bd_inode); |
Christoph Hellwig | ab0a973 | 2009-10-29 14:14:04 +0100 | [diff] [blame] | 695 | int error; |
Rafael J. Wysocki | da5aa86 | 2011-08-02 02:17:48 +0200 | [diff] [blame] | 696 | |
Jeff Layton | 372cf24 | 2017-07-06 07:02:28 -0400 | [diff] [blame] | 697 | error = file_write_and_wait_range(filp, start, end); |
Rafael J. Wysocki | da5aa86 | 2011-08-02 02:17:48 +0200 | [diff] [blame] | 698 | if (error) |
| 699 | return error; |
Christoph Hellwig | ab0a973 | 2009-10-29 14:14:04 +0100 | [diff] [blame] | 700 | |
Anton Blanchard | b8af67e | 2010-04-23 13:18:06 -0400 | [diff] [blame] | 701 | /* |
| 702 | * There is no need to serialise calls to blkdev_issue_flush with |
| 703 | * i_mutex and doing so causes performance issues with concurrent |
| 704 | * O_SYNC writers to a block device. |
| 705 | */ |
Christoph Hellwig | 9398554 | 2020-05-13 14:36:00 +0200 | [diff] [blame] | 706 | error = blkdev_issue_flush(bdev, GFP_KERNEL); |
Christoph Hellwig | ab0a973 | 2009-10-29 14:14:04 +0100 | [diff] [blame] | 707 | if (error == -EOPNOTSUPP) |
| 708 | error = 0; |
Anton Blanchard | b8af67e | 2010-04-23 13:18:06 -0400 | [diff] [blame] | 709 | |
Christoph Hellwig | ab0a973 | 2009-10-29 14:14:04 +0100 | [diff] [blame] | 710 | return error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 711 | } |
Andrew Morton | b1dd3b2 | 2010-04-06 14:35:00 -0700 | [diff] [blame] | 712 | EXPORT_SYMBOL(blkdev_fsync); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 714 | /** |
| 715 | * bdev_read_page() - Start reading a page from a block device |
| 716 | * @bdev: The device to read the page from |
| 717 | * @sector: The offset on the device to read the page to (need not be aligned) |
| 718 | * @page: The page to read |
| 719 | * |
| 720 | * On entry, the page should be locked. It will be unlocked when the page |
| 721 | * has been read. If the block driver implements rw_page synchronously, |
| 722 | * that will be true on exit from this function, but it need not be. |
| 723 | * |
| 724 | * Errors returned by this function are usually "soft", eg out of memory, or |
| 725 | * queue full; callers should try a different route to read this page rather |
| 726 | * than propagate an error back up the stack. |
| 727 | * |
| 728 | * Return: negative errno if an error occurs, 0 if submission was successful. |
| 729 | */ |
| 730 | int bdev_read_page(struct block_device *bdev, sector_t sector, |
| 731 | struct page *page) |
| 732 | { |
| 733 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
Dan Williams | 2e6edc95 | 2015-11-19 13:29:28 -0800 | [diff] [blame] | 734 | int result = -EOPNOTSUPP; |
| 735 | |
Vishal Verma | f68eb1e | 2015-05-12 13:48:53 -0400 | [diff] [blame] | 736 | if (!ops->rw_page || bdev_get_integrity(bdev)) |
Dan Williams | 2e6edc95 | 2015-11-19 13:29:28 -0800 | [diff] [blame] | 737 | return result; |
| 738 | |
Christoph Hellwig | e556f6b | 2020-06-26 10:01:56 +0200 | [diff] [blame] | 739 | result = blk_queue_enter(bdev->bd_disk->queue, 0); |
Dan Williams | 2e6edc95 | 2015-11-19 13:29:28 -0800 | [diff] [blame] | 740 | if (result) |
| 741 | return result; |
Tejun Heo | 3f289dc | 2018-07-18 04:47:36 -0700 | [diff] [blame] | 742 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, |
| 743 | REQ_OP_READ); |
Christoph Hellwig | e556f6b | 2020-06-26 10:01:56 +0200 | [diff] [blame] | 744 | blk_queue_exit(bdev->bd_disk->queue); |
Dan Williams | 2e6edc95 | 2015-11-19 13:29:28 -0800 | [diff] [blame] | 745 | return result; |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 746 | } |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 747 | |
| 748 | /** |
| 749 | * bdev_write_page() - Start writing a page to a block device |
| 750 | * @bdev: The device to write the page to |
| 751 | * @sector: The offset on the device to write the page to (need not be aligned) |
| 752 | * @page: The page to write |
| 753 | * @wbc: The writeback_control for the write |
| 754 | * |
| 755 | * On entry, the page should be locked and not currently under writeback. |
| 756 | * On exit, if the write started successfully, the page will be unlocked and |
| 757 | * under writeback. If the write failed already (eg the driver failed to |
| 758 | * queue the page to the device), the page will still be locked. If the |
| 759 | * caller is a ->writepage implementation, it will need to unlock the page. |
| 760 | * |
| 761 | * Errors returned by this function are usually "soft", eg out of memory, or |
| 762 | * queue full; callers should try a different route to write this page rather |
| 763 | * than propagate an error back up the stack. |
| 764 | * |
| 765 | * Return: negative errno if an error occurs, 0 if submission was successful. |
| 766 | */ |
| 767 | int bdev_write_page(struct block_device *bdev, sector_t sector, |
| 768 | struct page *page, struct writeback_control *wbc) |
| 769 | { |
| 770 | int result; |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 771 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
Dan Williams | 2e6edc95 | 2015-11-19 13:29:28 -0800 | [diff] [blame] | 772 | |
Vishal Verma | f68eb1e | 2015-05-12 13:48:53 -0400 | [diff] [blame] | 773 | if (!ops->rw_page || bdev_get_integrity(bdev)) |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 774 | return -EOPNOTSUPP; |
Christoph Hellwig | e556f6b | 2020-06-26 10:01:56 +0200 | [diff] [blame] | 775 | result = blk_queue_enter(bdev->bd_disk->queue, 0); |
Dan Williams | 2e6edc95 | 2015-11-19 13:29:28 -0800 | [diff] [blame] | 776 | if (result) |
| 777 | return result; |
| 778 | |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 779 | set_page_writeback(page); |
Tejun Heo | 3f289dc | 2018-07-18 04:47:36 -0700 | [diff] [blame] | 780 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, |
| 781 | REQ_OP_WRITE); |
Matthew Wilcox | f892760 | 2017-10-13 15:58:15 -0700 | [diff] [blame] | 782 | if (result) { |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 783 | end_page_writeback(page); |
Matthew Wilcox | f892760 | 2017-10-13 15:58:15 -0700 | [diff] [blame] | 784 | } else { |
| 785 | clean_page_buffers(page); |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 786 | unlock_page(page); |
Matthew Wilcox | f892760 | 2017-10-13 15:58:15 -0700 | [diff] [blame] | 787 | } |
Christoph Hellwig | e556f6b | 2020-06-26 10:01:56 +0200 | [diff] [blame] | 788 | blk_queue_exit(bdev->bd_disk->queue); |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 789 | return result; |
| 790 | } |
Matthew Wilcox | 47a191f | 2014-06-04 16:07:46 -0700 | [diff] [blame] | 791 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | /* |
| 793 | * pseudo-fs |
| 794 | */ |
| 795 | |
| 796 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 797 | static struct kmem_cache * bdev_cachep __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | |
| 799 | static struct inode *bdev_alloc_inode(struct super_block *sb) |
| 800 | { |
Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 801 | struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 802 | if (!ei) |
| 803 | return NULL; |
| 804 | return &ei->vfs_inode; |
| 805 | } |
| 806 | |
Al Viro | 41149cb | 2019-04-10 15:12:38 -0400 | [diff] [blame] | 807 | static void bdev_free_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 808 | { |
Al Viro | 41149cb | 2019-04-10 15:12:38 -0400 | [diff] [blame] | 809 | kmem_cache_free(bdev_cachep, BDEV_I(inode)); |
Nick Piggin | fa0d7e3d | 2011-01-07 17:49:49 +1100 | [diff] [blame] | 810 | } |
| 811 | |
Alexey Dobriyan | 51cc506 | 2008-07-25 19:45:34 -0700 | [diff] [blame] | 812 | static void init_once(void *foo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 813 | { |
| 814 | struct bdev_inode *ei = (struct bdev_inode *) foo; |
| 815 | struct block_device *bdev = &ei->bdev; |
| 816 | |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 817 | memset(bdev, 0, sizeof(*bdev)); |
| 818 | mutex_init(&bdev->bd_mutex); |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 819 | #ifdef CONFIG_SYSFS |
| 820 | INIT_LIST_HEAD(&bdev->bd_holder_disks); |
| 821 | #endif |
Jan Kara | a5a79d0 | 2017-03-02 16:50:13 +0100 | [diff] [blame] | 822 | bdev->bd_bdi = &noop_backing_dev_info; |
Christoph Lameter | a35afb8 | 2007-05-16 22:10:57 -0700 | [diff] [blame] | 823 | inode_init_once(&ei->vfs_inode); |
Takashi Sato | fcccf50 | 2009-01-09 16:40:59 -0800 | [diff] [blame] | 824 | /* Initialize mutex for freeze. */ |
| 825 | mutex_init(&bdev->bd_fsfreeze_mutex); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 826 | } |
| 827 | |
Al Viro | b57922d | 2010-06-07 14:34:48 -0400 | [diff] [blame] | 828 | static void bdev_evict_inode(struct inode *inode) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | { |
| 830 | struct block_device *bdev = &BDEV_I(inode)->bdev; |
Johannes Weiner | 91b0abe | 2014-04-03 14:47:49 -0700 | [diff] [blame] | 831 | truncate_inode_pages_final(&inode->i_data); |
Al Viro | b57922d | 2010-06-07 14:34:48 -0400 | [diff] [blame] | 832 | invalidate_inode_buffers(inode); /* is it needed here? */ |
Jan Kara | dbd5768 | 2012-05-03 14:48:02 +0200 | [diff] [blame] | 833 | clear_inode(inode); |
Jan Kara | f759741 | 2017-03-23 01:37:00 +0100 | [diff] [blame] | 834 | /* Detach inode from wb early as bdi_put() may free bdi->wb */ |
| 835 | inode_detach_wb(inode); |
Jan Kara | a5a79d0 | 2017-03-02 16:50:13 +0100 | [diff] [blame] | 836 | if (bdev->bd_bdi != &noop_backing_dev_info) { |
Jan Kara | b1d2dc56 | 2017-02-02 15:56:52 +0100 | [diff] [blame] | 837 | bdi_put(bdev->bd_bdi); |
Jan Kara | a5a79d0 | 2017-03-02 16:50:13 +0100 | [diff] [blame] | 838 | bdev->bd_bdi = &noop_backing_dev_info; |
| 839 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | } |
| 841 | |
Josef 'Jeff' Sipek | ee9b6d6 | 2007-02-12 00:55:41 -0800 | [diff] [blame] | 842 | static const struct super_operations bdev_sops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 843 | .statfs = simple_statfs, |
| 844 | .alloc_inode = bdev_alloc_inode, |
Al Viro | 41149cb | 2019-04-10 15:12:38 -0400 | [diff] [blame] | 845 | .free_inode = bdev_free_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 846 | .drop_inode = generic_delete_inode, |
Al Viro | b57922d | 2010-06-07 14:34:48 -0400 | [diff] [blame] | 847 | .evict_inode = bdev_evict_inode, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 848 | }; |
| 849 | |
David Howells | 9030d16 | 2019-03-25 16:38:23 +0000 | [diff] [blame] | 850 | static int bd_init_fs_context(struct fs_context *fc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | { |
David Howells | 9030d16 | 2019-03-25 16:38:23 +0000 | [diff] [blame] | 852 | struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC); |
| 853 | if (!ctx) |
| 854 | return -ENOMEM; |
| 855 | fc->s_iflags |= SB_I_CGROUPWB; |
| 856 | ctx->ops = &bdev_sops; |
| 857 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | } |
| 859 | |
| 860 | static struct file_system_type bd_type = { |
| 861 | .name = "bdev", |
David Howells | 9030d16 | 2019-03-25 16:38:23 +0000 | [diff] [blame] | 862 | .init_fs_context = bd_init_fs_context, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | .kill_sb = kill_anon_super, |
| 864 | }; |
| 865 | |
Tejun Heo | a212b10 | 2015-05-22 17:13:33 -0400 | [diff] [blame] | 866 | struct super_block *blockdev_superblock __read_mostly; |
| 867 | EXPORT_SYMBOL_GPL(blockdev_superblock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | |
| 869 | void __init bdev_cache_init(void) |
| 870 | { |
| 871 | int err; |
Sergey Senozhatsky | ace8577 | 2012-01-10 02:43:59 +0300 | [diff] [blame] | 872 | static struct vfsmount *bd_mnt; |
Denis ChengRq | c2acf7b | 2008-12-01 14:34:56 -0800 | [diff] [blame] | 873 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode), |
Paul Jackson | fffb60f | 2006-03-24 03:16:06 -0800 | [diff] [blame] | 875 | 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| |
Vladimir Davydov | 5d09705 | 2016-01-14 15:18:21 -0800 | [diff] [blame] | 876 | SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 877 | init_once); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | err = register_filesystem(&bd_type); |
| 879 | if (err) |
| 880 | panic("Cannot register bdev pseudo-fs"); |
| 881 | bd_mnt = kern_mount(&bd_type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | if (IS_ERR(bd_mnt)) |
| 883 | panic("Cannot create bdev pseudo-fs"); |
Sergey Senozhatsky | ace8577 | 2012-01-10 02:43:59 +0300 | [diff] [blame] | 884 | blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 885 | } |
| 886 | |
| 887 | /* |
| 888 | * Most likely _very_ bad one - but then it's hardly critical for small |
| 889 | * /dev and can be fixed when somebody will need really large one. |
| 890 | * Keep in mind that it will be fed through icache hash function too. |
| 891 | */ |
| 892 | static inline unsigned long hash(dev_t dev) |
| 893 | { |
| 894 | return MAJOR(dev)+MINOR(dev); |
| 895 | } |
| 896 | |
| 897 | static int bdev_test(struct inode *inode, void *data) |
| 898 | { |
| 899 | return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; |
| 900 | } |
| 901 | |
| 902 | static int bdev_set(struct inode *inode, void *data) |
| 903 | { |
| 904 | BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; |
| 905 | return 0; |
| 906 | } |
| 907 | |
Christoph Hellwig | 10ed166 | 2020-09-25 18:06:18 +0200 | [diff] [blame] | 908 | static struct block_device *bdget(dev_t dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 909 | { |
| 910 | struct block_device *bdev; |
| 911 | struct inode *inode; |
| 912 | |
Denis ChengRq | c2acf7b | 2008-12-01 14:34:56 -0800 | [diff] [blame] | 913 | inode = iget5_locked(blockdev_superblock, hash(dev), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | bdev_test, bdev_set, &dev); |
| 915 | |
| 916 | if (!inode) |
| 917 | return NULL; |
| 918 | |
| 919 | bdev = &BDEV_I(inode)->bdev; |
| 920 | |
| 921 | if (inode->i_state & I_NEW) { |
Christoph Hellwig | c2b4bb8 | 2020-08-23 11:10:42 +0200 | [diff] [blame] | 922 | spin_lock_init(&bdev->bd_size_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 | bdev->bd_contains = NULL; |
Lachlan McIlroy | 782b94c | 2011-06-30 11:01:45 +1000 | [diff] [blame] | 924 | bdev->bd_super = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | bdev->bd_inode = inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | bdev->bd_part_count = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 927 | inode->i_mode = S_IFBLK; |
| 928 | inode->i_rdev = dev; |
| 929 | inode->i_bdev = bdev; |
| 930 | inode->i_data.a_ops = &def_blk_aops; |
| 931 | mapping_set_gfp_mask(&inode->i_data, GFP_USER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 | unlock_new_inode(inode); |
| 933 | } |
| 934 | return bdev; |
| 935 | } |
| 936 | |
Alan Jenkins | dddac6a | 2009-07-29 21:07:55 +0200 | [diff] [blame] | 937 | /** |
| 938 | * bdgrab -- Grab a reference to an already referenced block device |
| 939 | * @bdev: Block device to grab a reference to. |
| 940 | */ |
| 941 | struct block_device *bdgrab(struct block_device *bdev) |
| 942 | { |
Al Viro | 7de9c6ee | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 943 | ihold(bdev->bd_inode); |
Alan Jenkins | dddac6a | 2009-07-29 21:07:55 +0200 | [diff] [blame] | 944 | return bdev; |
| 945 | } |
Anatol Pomozov | c1681bf | 2013-04-01 09:47:56 -0700 | [diff] [blame] | 946 | EXPORT_SYMBOL(bdgrab); |
Alan Jenkins | dddac6a | 2009-07-29 21:07:55 +0200 | [diff] [blame] | 947 | |
Christoph Hellwig | 10ed166 | 2020-09-25 18:06:18 +0200 | [diff] [blame] | 948 | struct block_device *bdget_part(struct hd_struct *part) |
| 949 | { |
| 950 | return bdget(part_devt(part)); |
| 951 | } |
| 952 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 953 | long nr_blockdev_pages(void) |
| 954 | { |
Christoph Hellwig | 1008fe6 | 2020-06-26 10:01:58 +0200 | [diff] [blame] | 955 | struct inode *inode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 956 | long ret = 0; |
Christoph Hellwig | 1008fe6 | 2020-06-26 10:01:58 +0200 | [diff] [blame] | 957 | |
| 958 | spin_lock(&blockdev_superblock->s_inode_list_lock); |
| 959 | list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) |
| 960 | ret += inode->i_mapping->nrpages; |
| 961 | spin_unlock(&blockdev_superblock->s_inode_list_lock); |
| 962 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | return ret; |
| 964 | } |
| 965 | |
| 966 | void bdput(struct block_device *bdev) |
| 967 | { |
| 968 | iput(bdev->bd_inode); |
| 969 | } |
| 970 | |
| 971 | EXPORT_SYMBOL(bdput); |
| 972 | |
| 973 | static struct block_device *bd_acquire(struct inode *inode) |
| 974 | { |
| 975 | struct block_device *bdev; |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 976 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 977 | spin_lock(&bdev_lock); |
| 978 | bdev = inode->i_bdev; |
Jan Kara | cccd9fb | 2017-02-21 18:09:48 +0100 | [diff] [blame] | 979 | if (bdev && !inode_unhashed(bdev->bd_inode)) { |
Ilya Dryomov | ed8a9d2c | 2015-11-20 22:18:43 +0100 | [diff] [blame] | 980 | bdgrab(bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | spin_unlock(&bdev_lock); |
| 982 | return bdev; |
| 983 | } |
| 984 | spin_unlock(&bdev_lock); |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 985 | |
Jan Kara | cccd9fb | 2017-02-21 18:09:48 +0100 | [diff] [blame] | 986 | /* |
| 987 | * i_bdev references block device inode that was already shut down |
| 988 | * (corresponding device got removed). Remove the reference and look |
| 989 | * up block device inode again just in case new device got |
| 990 | * reestablished under the same device number. |
| 991 | */ |
| 992 | if (bdev) |
| 993 | bd_forget(inode); |
| 994 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | bdev = bdget(inode->i_rdev); |
| 996 | if (bdev) { |
| 997 | spin_lock(&bdev_lock); |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 998 | if (!inode->i_bdev) { |
| 999 | /* |
Al Viro | 7de9c6ee | 2010-10-23 11:11:40 -0400 | [diff] [blame] | 1000 | * We take an additional reference to bd_inode, |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 1001 | * and it's released in clear_inode() of inode. |
| 1002 | * So, we can access it via ->i_mapping always |
| 1003 | * without igrab(). |
| 1004 | */ |
Ilya Dryomov | ed8a9d2c | 2015-11-20 22:18:43 +0100 | [diff] [blame] | 1005 | bdgrab(bdev); |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 1006 | inode->i_bdev = bdev; |
| 1007 | inode->i_mapping = bdev->bd_inode->i_mapping; |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 1008 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | spin_unlock(&bdev_lock); |
| 1010 | } |
| 1011 | return bdev; |
| 1012 | } |
| 1013 | |
| 1014 | /* Call when you free inode */ |
| 1015 | |
| 1016 | void bd_forget(struct inode *inode) |
| 1017 | { |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 1018 | struct block_device *bdev = NULL; |
| 1019 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | spin_lock(&bdev_lock); |
Yan Hong | b4ea2ea | 2013-04-30 15:26:47 -0700 | [diff] [blame] | 1021 | if (!sb_is_blkdev_sb(inode->i_sb)) |
| 1022 | bdev = inode->i_bdev; |
Al Viro | a4a4f94 | 2016-07-19 13:16:52 -0400 | [diff] [blame] | 1023 | inode->i_bdev = NULL; |
| 1024 | inode->i_mapping = &inode->i_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 | spin_unlock(&bdev_lock); |
OGAWA Hirofumi | 09d967c | 2006-06-22 14:47:21 -0700 | [diff] [blame] | 1026 | |
| 1027 | if (bdev) |
Ilya Dryomov | ed8a9d2c | 2015-11-20 22:18:43 +0100 | [diff] [blame] | 1028 | bdput(bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1029 | } |
| 1030 | |
Tejun Heo | 1a3cbbc | 2010-04-07 18:52:29 +0900 | [diff] [blame] | 1031 | /** |
| 1032 | * bd_may_claim - test whether a block device can be claimed |
| 1033 | * @bdev: block device of interest |
| 1034 | * @whole: whole block device containing @bdev, may equal @bdev |
| 1035 | * @holder: holder trying to claim @bdev |
| 1036 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 1037 | * Test whether @bdev can be claimed by @holder. |
Tejun Heo | 1a3cbbc | 2010-04-07 18:52:29 +0900 | [diff] [blame] | 1038 | * |
| 1039 | * CONTEXT: |
| 1040 | * spin_lock(&bdev_lock). |
| 1041 | * |
| 1042 | * RETURNS: |
| 1043 | * %true if @bdev can be claimed, %false otherwise. |
| 1044 | */ |
| 1045 | static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, |
| 1046 | void *holder) |
| 1047 | { |
| 1048 | if (bdev->bd_holder == holder) |
| 1049 | return true; /* already a holder */ |
| 1050 | else if (bdev->bd_holder != NULL) |
| 1051 | return false; /* held by someone else */ |
NeilBrown | bcc7f5b | 2016-12-12 08:21:51 -0700 | [diff] [blame] | 1052 | else if (whole == bdev) |
Tejun Heo | 1a3cbbc | 2010-04-07 18:52:29 +0900 | [diff] [blame] | 1053 | return true; /* is a whole device which isn't held */ |
| 1054 | |
Tejun Heo | e525fd8 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1055 | else if (whole->bd_holder == bd_may_claim) |
Tejun Heo | 1a3cbbc | 2010-04-07 18:52:29 +0900 | [diff] [blame] | 1056 | return true; /* is a partition of a device that is being partitioned */ |
| 1057 | else if (whole->bd_holder != NULL) |
| 1058 | return false; /* is a partition of a held device */ |
| 1059 | else |
| 1060 | return true; /* is a partition of an un-held device */ |
| 1061 | } |
| 1062 | |
| 1063 | /** |
Christoph Hellwig | 58e46ed | 2020-07-16 16:33:08 +0200 | [diff] [blame] | 1064 | * bd_prepare_to_claim - claim a block device |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1065 | * @bdev: block device of interest |
| 1066 | * @whole: the whole device containing @bdev, may equal @bdev |
| 1067 | * @holder: holder trying to claim @bdev |
| 1068 | * |
Christoph Hellwig | 58e46ed | 2020-07-16 16:33:08 +0200 | [diff] [blame] | 1069 | * Claim @bdev. This function fails if @bdev is already claimed by another |
| 1070 | * holder and waits if another claiming is in progress. return, the caller |
| 1071 | * has ownership of bd_claiming and bd_holder[s]. |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1072 | * |
| 1073 | * RETURNS: |
| 1074 | * 0 if @bdev can be claimed, -EBUSY otherwise. |
| 1075 | */ |
Christoph Hellwig | ecbe6bc | 2020-07-16 16:33:09 +0200 | [diff] [blame] | 1076 | int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, |
| 1077 | void *holder) |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1078 | { |
| 1079 | retry: |
Christoph Hellwig | 58e46ed | 2020-07-16 16:33:08 +0200 | [diff] [blame] | 1080 | spin_lock(&bdev_lock); |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1081 | /* if someone else claimed, fail */ |
Christoph Hellwig | 58e46ed | 2020-07-16 16:33:08 +0200 | [diff] [blame] | 1082 | if (!bd_may_claim(bdev, whole, holder)) { |
| 1083 | spin_unlock(&bdev_lock); |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1084 | return -EBUSY; |
Christoph Hellwig | 58e46ed | 2020-07-16 16:33:08 +0200 | [diff] [blame] | 1085 | } |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1086 | |
Tejun Heo | e75aa85 | 2010-08-04 17:59:39 +0200 | [diff] [blame] | 1087 | /* if claiming is already in progress, wait for it to finish */ |
| 1088 | if (whole->bd_claiming) { |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1089 | wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); |
| 1090 | DEFINE_WAIT(wait); |
| 1091 | |
| 1092 | prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); |
| 1093 | spin_unlock(&bdev_lock); |
| 1094 | schedule(); |
| 1095 | finish_wait(wq, &wait); |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1096 | goto retry; |
| 1097 | } |
| 1098 | |
| 1099 | /* yay, all mine */ |
Christoph Hellwig | 58e46ed | 2020-07-16 16:33:08 +0200 | [diff] [blame] | 1100 | whole->bd_claiming = holder; |
| 1101 | spin_unlock(&bdev_lock); |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1102 | return 0; |
| 1103 | } |
Christoph Hellwig | ecbe6bc | 2020-07-16 16:33:09 +0200 | [diff] [blame] | 1104 | EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1105 | |
Jan Kara | 560e7cb | 2018-02-26 13:01:42 +0100 | [diff] [blame] | 1106 | static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) |
| 1107 | { |
| 1108 | struct gendisk *disk = get_gendisk(bdev->bd_dev, partno); |
| 1109 | |
| 1110 | if (!disk) |
| 1111 | return NULL; |
| 1112 | /* |
| 1113 | * Now that we hold gendisk reference we make sure bdev we looked up is |
| 1114 | * not stale. If it is, it means device got removed and created before |
| 1115 | * we looked up gendisk and we fail open in such case. Associating |
| 1116 | * unhashed bdev with newly created gendisk could lead to two bdevs |
| 1117 | * (and thus two independent caches) being associated with one device |
| 1118 | * which is bad. |
| 1119 | */ |
| 1120 | if (inode_unhashed(bdev->bd_inode)) { |
| 1121 | put_disk_and_module(disk); |
| 1122 | return NULL; |
| 1123 | } |
| 1124 | return disk; |
| 1125 | } |
| 1126 | |
Jan Kara | 89e524c0 | 2019-07-30 13:10:14 +0200 | [diff] [blame] | 1127 | static void bd_clear_claiming(struct block_device *whole, void *holder) |
| 1128 | { |
| 1129 | lockdep_assert_held(&bdev_lock); |
| 1130 | /* tell others that we're done */ |
| 1131 | BUG_ON(whole->bd_claiming != holder); |
| 1132 | whole->bd_claiming = NULL; |
| 1133 | wake_up_bit(&whole->bd_claiming, 0); |
| 1134 | } |
| 1135 | |
| 1136 | /** |
| 1137 | * bd_finish_claiming - finish claiming of a block device |
| 1138 | * @bdev: block device of interest |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1139 | * @whole: whole block device |
Jan Kara | 89e524c0 | 2019-07-30 13:10:14 +0200 | [diff] [blame] | 1140 | * @holder: holder that has claimed @bdev |
| 1141 | * |
| 1142 | * Finish exclusive open of a block device. Mark the device as exlusively |
| 1143 | * open by the holder and wake up all waiters for exclusive open to finish. |
| 1144 | */ |
Christoph Hellwig | 764b23b | 2020-06-20 09:16:36 +0200 | [diff] [blame] | 1145 | static void bd_finish_claiming(struct block_device *bdev, |
| 1146 | struct block_device *whole, void *holder) |
Jan Kara | 89e524c0 | 2019-07-30 13:10:14 +0200 | [diff] [blame] | 1147 | { |
| 1148 | spin_lock(&bdev_lock); |
| 1149 | BUG_ON(!bd_may_claim(bdev, whole, holder)); |
| 1150 | /* |
| 1151 | * Note that for a whole device bd_holders will be incremented twice, |
| 1152 | * and bd_holder will be set to bd_may_claim before being set to holder |
| 1153 | */ |
| 1154 | whole->bd_holders++; |
| 1155 | whole->bd_holder = bd_may_claim; |
| 1156 | bdev->bd_holders++; |
| 1157 | bdev->bd_holder = holder; |
| 1158 | bd_clear_claiming(whole, holder); |
| 1159 | spin_unlock(&bdev_lock); |
| 1160 | } |
Jan Kara | 89e524c0 | 2019-07-30 13:10:14 +0200 | [diff] [blame] | 1161 | |
| 1162 | /** |
| 1163 | * bd_abort_claiming - abort claiming of a block device |
| 1164 | * @bdev: block device of interest |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1165 | * @whole: whole block device |
Jan Kara | 89e524c0 | 2019-07-30 13:10:14 +0200 | [diff] [blame] | 1166 | * @holder: holder that has claimed @bdev |
| 1167 | * |
| 1168 | * Abort claiming of a block device when the exclusive open failed. This can be |
| 1169 | * also used when exclusive open is not actually desired and we just needed |
| 1170 | * to block other exclusive openers for a while. |
| 1171 | */ |
| 1172 | void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, |
| 1173 | void *holder) |
| 1174 | { |
| 1175 | spin_lock(&bdev_lock); |
| 1176 | bd_clear_claiming(whole, holder); |
| 1177 | spin_unlock(&bdev_lock); |
| 1178 | } |
| 1179 | EXPORT_SYMBOL(bd_abort_claiming); |
Tejun Heo | 6b4517a | 2010-04-07 18:53:59 +0900 | [diff] [blame] | 1180 | |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1181 | #ifdef CONFIG_SYSFS |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1182 | struct bd_holder_disk { |
| 1183 | struct list_head list; |
| 1184 | struct gendisk *disk; |
| 1185 | int refcnt; |
| 1186 | }; |
| 1187 | |
| 1188 | static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev, |
| 1189 | struct gendisk *disk) |
| 1190 | { |
| 1191 | struct bd_holder_disk *holder; |
| 1192 | |
| 1193 | list_for_each_entry(holder, &bdev->bd_holder_disks, list) |
| 1194 | if (holder->disk == disk) |
| 1195 | return holder; |
| 1196 | return NULL; |
| 1197 | } |
| 1198 | |
Andrew Morton | 4d7dd8fd | 2006-09-29 01:58:56 -0700 | [diff] [blame] | 1199 | static int add_symlink(struct kobject *from, struct kobject *to) |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1200 | { |
Andrew Morton | 4d7dd8fd | 2006-09-29 01:58:56 -0700 | [diff] [blame] | 1201 | return sysfs_create_link(from, to, kobject_name(to)); |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1202 | } |
| 1203 | |
| 1204 | static void del_symlink(struct kobject *from, struct kobject *to) |
| 1205 | { |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1206 | sysfs_remove_link(from, kobject_name(to)); |
| 1207 | } |
| 1208 | |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1209 | /** |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1210 | * bd_link_disk_holder - create symlinks between holding disk and slave bdev |
| 1211 | * @bdev: the claimed slave bdev |
| 1212 | * @disk: the holding disk |
Jun'ichi Nomura | df6c0cd | 2006-10-30 16:23:56 -0500 | [diff] [blame] | 1213 | * |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1214 | * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. |
| 1215 | * |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1216 | * This functions creates the following sysfs symlinks. |
Jun'ichi Nomura | df6c0cd | 2006-10-30 16:23:56 -0500 | [diff] [blame] | 1217 | * |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1218 | * - from "slaves" directory of the holder @disk to the claimed @bdev |
| 1219 | * - from "holders" directory of the @bdev to the holder @disk |
| 1220 | * |
| 1221 | * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is |
| 1222 | * passed to bd_link_disk_holder(), then: |
| 1223 | * |
| 1224 | * /sys/block/dm-0/slaves/sda --> /sys/block/sda |
| 1225 | * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 |
| 1226 | * |
| 1227 | * The caller must have claimed @bdev before calling this function and |
| 1228 | * ensure that both @bdev and @disk are valid during the creation and |
| 1229 | * lifetime of these symlinks. |
| 1230 | * |
| 1231 | * CONTEXT: |
| 1232 | * Might sleep. |
| 1233 | * |
| 1234 | * RETURNS: |
| 1235 | * 0 on success, -errno on failure. |
Jun'ichi Nomura | df6c0cd | 2006-10-30 16:23:56 -0500 | [diff] [blame] | 1236 | */ |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1237 | int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) |
Jun'ichi Nomura | df6c0cd | 2006-10-30 16:23:56 -0500 | [diff] [blame] | 1238 | { |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1239 | struct bd_holder_disk *holder; |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1240 | int ret = 0; |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1241 | |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1242 | mutex_lock(&bdev->bd_mutex); |
Jun'ichi Nomura | df6c0cd | 2006-10-30 16:23:56 -0500 | [diff] [blame] | 1243 | |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1244 | WARN_ON_ONCE(!bdev->bd_holder); |
Johannes Weiner | 4e91672 | 2007-07-15 23:41:25 -0700 | [diff] [blame] | 1245 | |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1246 | /* FIXME: remove the following once add_disk() handles errors */ |
| 1247 | if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) |
| 1248 | goto out_unlock; |
Johannes Weiner | 4e91672 | 2007-07-15 23:41:25 -0700 | [diff] [blame] | 1249 | |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1250 | holder = bd_find_holder_disk(bdev, disk); |
| 1251 | if (holder) { |
| 1252 | holder->refcnt++; |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1253 | goto out_unlock; |
| 1254 | } |
| 1255 | |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1256 | holder = kzalloc(sizeof(*holder), GFP_KERNEL); |
| 1257 | if (!holder) { |
| 1258 | ret = -ENOMEM; |
| 1259 | goto out_unlock; |
| 1260 | } |
| 1261 | |
| 1262 | INIT_LIST_HEAD(&holder->list); |
| 1263 | holder->disk = disk; |
| 1264 | holder->refcnt = 1; |
| 1265 | |
| 1266 | ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
| 1267 | if (ret) |
| 1268 | goto out_free; |
| 1269 | |
| 1270 | ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); |
| 1271 | if (ret) |
| 1272 | goto out_del; |
Tejun Heo | e7407d1 | 2011-02-24 09:56:32 +0100 | [diff] [blame] | 1273 | /* |
| 1274 | * bdev could be deleted beneath us which would implicitly destroy |
| 1275 | * the holder directory. Hold on to it. |
| 1276 | */ |
| 1277 | kobject_get(bdev->bd_part->holder_dir); |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1278 | |
| 1279 | list_add(&holder->list, &bdev->bd_holder_disks); |
| 1280 | goto out_unlock; |
| 1281 | |
| 1282 | out_del: |
| 1283 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
| 1284 | out_free: |
| 1285 | kfree(holder); |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1286 | out_unlock: |
Jun'ichi Nomura | b4cf1b7 | 2006-03-27 01:18:00 -0800 | [diff] [blame] | 1287 | mutex_unlock(&bdev->bd_mutex); |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1288 | return ret; |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1289 | } |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1290 | EXPORT_SYMBOL_GPL(bd_link_disk_holder); |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1291 | |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1292 | /** |
| 1293 | * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder() |
| 1294 | * @bdev: the calimed slave bdev |
| 1295 | * @disk: the holding disk |
| 1296 | * |
| 1297 | * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT. |
| 1298 | * |
| 1299 | * CONTEXT: |
| 1300 | * Might sleep. |
| 1301 | */ |
| 1302 | void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1303 | { |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1304 | struct bd_holder_disk *holder; |
Tejun Heo | e09b457 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1305 | |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1306 | mutex_lock(&bdev->bd_mutex); |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1307 | |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1308 | holder = bd_find_holder_disk(bdev, disk); |
| 1309 | |
| 1310 | if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { |
| 1311 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
| 1312 | del_symlink(bdev->bd_part->holder_dir, |
| 1313 | &disk_to_dev(disk)->kobj); |
Tejun Heo | e7407d1 | 2011-02-24 09:56:32 +0100 | [diff] [blame] | 1314 | kobject_put(bdev->bd_part->holder_dir); |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1315 | list_del_init(&holder->list); |
| 1316 | kfree(holder); |
| 1317 | } |
| 1318 | |
| 1319 | mutex_unlock(&bdev->bd_mutex); |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1320 | } |
Tejun Heo | 49731ba | 2011-01-14 18:43:57 +0100 | [diff] [blame] | 1321 | EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); |
Jun'ichi Nomura | 641dc63 | 2006-03-27 01:17:57 -0800 | [diff] [blame] | 1322 | #endif |
| 1323 | |
Andrew Patterson | 0c002c2 | 2008-09-04 14:27:20 -0600 | [diff] [blame] | 1324 | /** |
Randy Dunlap | 57d1b53 | 2008-10-09 10:42:38 +0200 | [diff] [blame] | 1325 | * check_disk_size_change - checks for disk size change and adjusts bdev size. |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1326 | * @disk: struct gendisk to check |
| 1327 | * @bdev: struct bdev to adjust. |
Christoph Hellwig | 5afb783 | 2018-05-29 16:42:59 +0200 | [diff] [blame] | 1328 | * @verbose: if %true log a message about a size change if there is any |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1329 | * |
| 1330 | * This routine checks to see if the bdev size does not match the disk size |
shunki-fujita | 849cf55 | 2018-04-05 16:20:07 -0700 | [diff] [blame] | 1331 | * and adjusts it if it differs. When shrinking the bdev size, its all caches |
| 1332 | * are freed. |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1333 | */ |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1334 | static void check_disk_size_change(struct gendisk *disk, |
| 1335 | struct block_device *bdev, bool verbose) |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1336 | { |
| 1337 | loff_t disk_size, bdev_size; |
| 1338 | |
Christoph Hellwig | c2b4bb8 | 2020-08-23 11:10:42 +0200 | [diff] [blame] | 1339 | spin_lock(&bdev->bd_size_lock); |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1340 | disk_size = (loff_t)get_capacity(disk) << 9; |
| 1341 | bdev_size = i_size_read(bdev->bd_inode); |
| 1342 | if (disk_size != bdev_size) { |
Christoph Hellwig | 5afb783 | 2018-05-29 16:42:59 +0200 | [diff] [blame] | 1343 | if (verbose) { |
| 1344 | printk(KERN_INFO |
| 1345 | "%s: detected capacity change from %lld to %lld\n", |
| 1346 | disk->disk_name, bdev_size, disk_size); |
| 1347 | } |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1348 | i_size_write(bdev->bd_inode, disk_size); |
Christoph Hellwig | c2b4bb8 | 2020-08-23 11:10:42 +0200 | [diff] [blame] | 1349 | } |
Christoph Hellwig | c2b4bb8 | 2020-08-23 11:10:42 +0200 | [diff] [blame] | 1350 | spin_unlock(&bdev->bd_size_lock); |
| 1351 | |
| 1352 | if (bdev_size > disk_size) { |
| 1353 | if (__invalidate_device(bdev, false)) |
Christoph Hellwig | 9a3ffbb | 2020-07-08 14:25:43 +0200 | [diff] [blame] | 1354 | pr_warn("VFS: busy inodes on resized disk %s\n", |
| 1355 | disk->disk_name); |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1356 | } |
| 1357 | } |
Andrew Patterson | c3279d1 | 2008-09-04 14:27:25 -0600 | [diff] [blame] | 1358 | |
| 1359 | /** |
Christoph Hellwig | 659e56b | 2020-09-01 17:57:43 +0200 | [diff] [blame] | 1360 | * revalidate_disk_size - checks for disk size change and adjusts bdev size. |
| 1361 | * @disk: struct gendisk to check |
| 1362 | * @verbose: if %true log a message about a size change if there is any |
| 1363 | * |
| 1364 | * This routine checks to see if the bdev size does not match the disk size |
| 1365 | * and adjusts it if it differs. When shrinking the bdev size, its all caches |
| 1366 | * are freed. |
| 1367 | */ |
| 1368 | void revalidate_disk_size(struct gendisk *disk, bool verbose) |
| 1369 | { |
| 1370 | struct block_device *bdev; |
| 1371 | |
| 1372 | /* |
| 1373 | * Hidden disks don't have associated bdev so there's no point in |
| 1374 | * revalidating them. |
| 1375 | */ |
| 1376 | if (disk->flags & GENHD_FL_HIDDEN) |
| 1377 | return; |
| 1378 | |
| 1379 | bdev = bdget_disk(disk, 0); |
| 1380 | if (bdev) { |
| 1381 | check_disk_size_change(disk, bdev, verbose); |
| 1382 | bdput(bdev); |
| 1383 | } |
| 1384 | } |
| 1385 | EXPORT_SYMBOL(revalidate_disk_size); |
| 1386 | |
Christoph Hellwig | 611bee5 | 2020-08-23 11:10:41 +0200 | [diff] [blame] | 1387 | void bd_set_nr_sectors(struct block_device *bdev, sector_t sectors) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1388 | { |
Christoph Hellwig | c2b4bb8 | 2020-08-23 11:10:42 +0200 | [diff] [blame] | 1389 | spin_lock(&bdev->bd_size_lock); |
Christoph Hellwig | 611bee5 | 2020-08-23 11:10:41 +0200 | [diff] [blame] | 1390 | i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); |
Christoph Hellwig | c2b4bb8 | 2020-08-23 11:10:42 +0200 | [diff] [blame] | 1391 | spin_unlock(&bdev->bd_size_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1392 | } |
Christoph Hellwig | 611bee5 | 2020-08-23 11:10:41 +0200 | [diff] [blame] | 1393 | EXPORT_SYMBOL(bd_set_nr_sectors); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1394 | |
Al Viro | 4385bab | 2013-05-05 22:11:03 -0400 | [diff] [blame] | 1395 | static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1396 | |
Christoph Hellwig | 142fe8f | 2019-11-14 15:34:35 +0100 | [diff] [blame] | 1397 | int bdev_disk_changed(struct block_device *bdev, bool invalidate) |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1398 | { |
Christoph Hellwig | 142fe8f | 2019-11-14 15:34:35 +0100 | [diff] [blame] | 1399 | struct gendisk *disk = bdev->bd_disk; |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1400 | int ret; |
| 1401 | |
Christoph Hellwig | f0b870d | 2019-11-14 15:34:36 +0100 | [diff] [blame] | 1402 | lockdep_assert_held(&bdev->bd_mutex); |
| 1403 | |
Gulam Mohamed | b0f5d48 | 2021-05-14 15:18:42 +0200 | [diff] [blame] | 1404 | if (!(disk->flags & GENHD_FL_UP)) |
| 1405 | return -ENXIO; |
| 1406 | |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1407 | rescan: |
Christoph Hellwig | d46430b | 2020-04-14 09:28:57 +0200 | [diff] [blame] | 1408 | ret = blk_drop_partitions(bdev); |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1409 | if (ret) |
| 1410 | return ret; |
| 1411 | |
Chris Chiu | 0379508 | 2021-03-23 16:52:19 +0800 | [diff] [blame] | 1412 | clear_bit(GD_NEED_PART_SCAN, &disk->state); |
| 1413 | |
Christoph Hellwig | d981cb5 | 2020-03-18 09:12:06 +0100 | [diff] [blame] | 1414 | /* |
| 1415 | * Historically we only set the capacity to zero for devices that |
| 1416 | * support partitions (independ of actually having partitions created). |
| 1417 | * Doing that is rather inconsistent, but changing it broke legacy |
| 1418 | * udisks polling for legacy ide-cdrom devices. Use the crude check |
| 1419 | * below to get the sane behavior for most device while not breaking |
| 1420 | * userspace for this particular setup. |
| 1421 | */ |
| 1422 | if (invalidate) { |
| 1423 | if (disk_part_scan_enabled(disk) || |
| 1424 | !(disk->flags & GENHD_FL_REMOVABLE)) |
| 1425 | set_capacity(disk, 0); |
| 1426 | } else { |
| 1427 | if (disk->fops->revalidate_disk) |
| 1428 | disk->fops->revalidate_disk(disk); |
| 1429 | } |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1430 | |
| 1431 | check_disk_size_change(disk, bdev, !invalidate); |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1432 | |
Christoph Hellwig | 142fe8f | 2019-11-14 15:34:35 +0100 | [diff] [blame] | 1433 | if (get_capacity(disk)) { |
| 1434 | ret = blk_add_partitions(disk, bdev); |
| 1435 | if (ret == -EAGAIN) |
| 1436 | goto rescan; |
Eric Biggers | 490547c | 2019-12-02 10:21:34 -0800 | [diff] [blame] | 1437 | } else if (invalidate) { |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1438 | /* |
| 1439 | * Tell userspace that the media / partition table may have |
| 1440 | * changed. |
| 1441 | */ |
| 1442 | kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1443 | } |
| 1444 | |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1445 | return ret; |
| 1446 | } |
Christoph Hellwig | f0b870d | 2019-11-14 15:34:36 +0100 | [diff] [blame] | 1447 | /* |
| 1448 | * Only exported for for loop and dasd for historic reasons. Don't use in new |
| 1449 | * code! |
| 1450 | */ |
| 1451 | EXPORT_SYMBOL_GPL(bdev_disk_changed); |
Christoph Hellwig | a1548b6 | 2019-11-14 15:34:34 +0100 | [diff] [blame] | 1452 | |
Peter Zijlstra | 6d740cd | 2007-02-20 13:58:18 -0800 | [diff] [blame] | 1453 | /* |
| 1454 | * bd_mutex locking: |
| 1455 | * |
| 1456 | * mutex_lock(part->bd_mutex) |
| 1457 | * mutex_lock_nested(whole->bd_mutex, 1) |
| 1458 | */ |
| 1459 | |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1460 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder, |
| 1461 | int for_part) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1462 | { |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1463 | struct block_device *whole = NULL, *claiming = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1464 | struct gendisk *disk; |
Pavel Emelyanov | 7db9cfd | 2008-06-05 22:46:27 -0700 | [diff] [blame] | 1465 | int ret; |
Tejun Heo | cf771cb | 2008-09-03 09:01:09 +0200 | [diff] [blame] | 1466 | int partno; |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1467 | bool first_open = false, unblock_events = true, need_restart; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | |
NeilBrown | d337482 | 2009-01-09 08:31:10 +1100 | [diff] [blame] | 1469 | restart: |
Christoph Hellwig | c5638ab | 2020-07-16 16:33:07 +0200 | [diff] [blame] | 1470 | need_restart = false; |
Tejun Heo | 89f9749 | 2008-11-05 10:21:06 +0100 | [diff] [blame] | 1471 | ret = -ENXIO; |
Jan Kara | 560e7cb | 2018-02-26 13:01:42 +0100 | [diff] [blame] | 1472 | disk = bdev_get_gendisk(bdev, &partno); |
Tejun Heo | 0762b8b | 2008-08-25 19:56:12 +0900 | [diff] [blame] | 1473 | if (!disk) |
Arnd Bergmann | 6e9624b | 2010-08-07 18:25:34 +0200 | [diff] [blame] | 1474 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1475 | |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1476 | if (partno) { |
| 1477 | whole = bdget_disk(disk, 0); |
| 1478 | if (!whole) { |
| 1479 | ret = -ENOMEM; |
| 1480 | goto out_put_disk; |
| 1481 | } |
| 1482 | } |
| 1483 | |
| 1484 | if (!for_part && (mode & FMODE_EXCL)) { |
| 1485 | WARN_ON_ONCE(!holder); |
| 1486 | if (whole) |
| 1487 | claiming = whole; |
| 1488 | else |
| 1489 | claiming = bdev; |
| 1490 | ret = bd_prepare_to_claim(bdev, claiming, holder); |
| 1491 | if (ret) |
| 1492 | goto out_put_whole; |
| 1493 | } |
| 1494 | |
Tejun Heo | 69e02c5 | 2011-03-09 19:54:27 +0100 | [diff] [blame] | 1495 | disk_block_events(disk); |
NeilBrown | 6796bf5 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1496 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1497 | if (!bdev->bd_openers) { |
Jan Kara | 8973665 | 2018-02-26 13:01:40 +0100 | [diff] [blame] | 1498 | first_open = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1499 | bdev->bd_disk = disk; |
| 1500 | bdev->bd_contains = bdev; |
Christoph Hellwig | c2ee070 | 2017-08-23 19:10:31 +0200 | [diff] [blame] | 1501 | bdev->bd_partno = partno; |
Dan Williams | 03cdadb | 2016-02-26 15:19:43 -0800 | [diff] [blame] | 1502 | |
Tejun Heo | cf771cb | 2008-09-03 09:01:09 +0200 | [diff] [blame] | 1503 | if (!partno) { |
Tejun Heo | 89f9749 | 2008-11-05 10:21:06 +0100 | [diff] [blame] | 1504 | ret = -ENXIO; |
| 1505 | bdev->bd_part = disk_get_part(disk, partno); |
| 1506 | if (!bdev->bd_part) |
| 1507 | goto out_clear; |
| 1508 | |
Tejun Heo | 1196f8b | 2011-04-21 20:54:45 +0200 | [diff] [blame] | 1509 | ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | if (disk->fops->open) { |
Al Viro | 572c489 | 2007-10-08 13:24:05 -0400 | [diff] [blame] | 1511 | ret = disk->fops->open(bdev, mode); |
Christoph Hellwig | c5638ab | 2020-07-16 16:33:07 +0200 | [diff] [blame] | 1512 | /* |
| 1513 | * If we lost a race with 'disk' being deleted, |
| 1514 | * try again. See md.c |
| 1515 | */ |
| 1516 | if (ret == -ERESTARTSYS) |
| 1517 | need_restart = true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 | } |
Tejun Heo | 7e69723 | 2011-05-23 13:26:07 +0200 | [diff] [blame] | 1519 | |
Jan Kara | 04906b2 | 2019-01-14 09:48:10 +0100 | [diff] [blame] | 1520 | if (!ret) { |
Christoph Hellwig | 611bee5 | 2020-08-23 11:10:41 +0200 | [diff] [blame] | 1521 | bd_set_nr_sectors(bdev, get_capacity(disk)); |
Jan Kara | 04906b2 | 2019-01-14 09:48:10 +0100 | [diff] [blame] | 1522 | set_init_blocksize(bdev); |
| 1523 | } |
Tejun Heo | 7e69723 | 2011-05-23 13:26:07 +0200 | [diff] [blame] | 1524 | |
Tejun Heo | 1196f8b | 2011-04-21 20:54:45 +0200 | [diff] [blame] | 1525 | /* |
| 1526 | * If the device is invalidated, rescan partition |
| 1527 | * if open succeeded or failed with -ENOMEDIUM. |
| 1528 | * The latter is necessary to prevent ghost |
| 1529 | * partitions on a removed medium. |
| 1530 | */ |
Christoph Hellwig | 38430f0 | 2020-09-21 09:19:45 +0200 | [diff] [blame] | 1531 | if (test_bit(GD_NEED_PART_SCAN, &disk->state) && |
Jan Kara | 731dc48 | 2019-10-21 10:37:59 +0200 | [diff] [blame] | 1532 | (!ret || ret == -ENOMEDIUM)) |
| 1533 | bdev_disk_changed(bdev, ret == -ENOMEDIUM); |
Dan Williams | 5a023cd | 2015-11-30 10:20:29 -0800 | [diff] [blame] | 1534 | |
Tejun Heo | 1196f8b | 2011-04-21 20:54:45 +0200 | [diff] [blame] | 1535 | if (ret) |
| 1536 | goto out_clear; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1537 | } else { |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1538 | BUG_ON(for_part); |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1539 | ret = __blkdev_get(whole, mode, NULL, 1); |
| 1540 | if (ret) |
Tejun Heo | 0762b8b | 2008-08-25 19:56:12 +0900 | [diff] [blame] | 1541 | goto out_clear; |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1542 | bdev->bd_contains = bdgrab(whole); |
Tejun Heo | 89f9749 | 2008-11-05 10:21:06 +0100 | [diff] [blame] | 1543 | bdev->bd_part = disk_get_part(disk, partno); |
Tejun Heo | e71bf0d | 2008-09-03 09:03:02 +0200 | [diff] [blame] | 1544 | if (!(disk->flags & GENHD_FL_UP) || |
Tejun Heo | 89f9749 | 2008-11-05 10:21:06 +0100 | [diff] [blame] | 1545 | !bdev->bd_part || !bdev->bd_part->nr_sects) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1546 | ret = -ENXIO; |
Tejun Heo | 0762b8b | 2008-08-25 19:56:12 +0900 | [diff] [blame] | 1547 | goto out_clear; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1548 | } |
Christoph Hellwig | 611bee5 | 2020-08-23 11:10:41 +0200 | [diff] [blame] | 1549 | bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects); |
Jan Kara | 04906b2 | 2019-01-14 09:48:10 +0100 | [diff] [blame] | 1550 | set_init_blocksize(bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1551 | } |
Jan Kara | 03e2627 | 2017-03-23 01:36:53 +0100 | [diff] [blame] | 1552 | |
| 1553 | if (bdev->bd_bdi == &noop_backing_dev_info) |
| 1554 | bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1555 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1556 | if (bdev->bd_contains == bdev) { |
Tejun Heo | 1196f8b | 2011-04-21 20:54:45 +0200 | [diff] [blame] | 1557 | ret = 0; |
| 1558 | if (bdev->bd_disk->fops->open) |
Al Viro | 572c489 | 2007-10-08 13:24:05 -0400 | [diff] [blame] | 1559 | ret = bdev->bd_disk->fops->open(bdev, mode); |
Tejun Heo | 1196f8b | 2011-04-21 20:54:45 +0200 | [diff] [blame] | 1560 | /* the same as first opener case, read comment there */ |
Christoph Hellwig | 38430f0 | 2020-09-21 09:19:45 +0200 | [diff] [blame] | 1561 | if (test_bit(GD_NEED_PART_SCAN, &disk->state) && |
Jan Kara | 731dc48 | 2019-10-21 10:37:59 +0200 | [diff] [blame] | 1562 | (!ret || ret == -ENOMEDIUM)) |
| 1563 | bdev_disk_changed(bdev, ret == -ENOMEDIUM); |
Tejun Heo | 1196f8b | 2011-04-21 20:54:45 +0200 | [diff] [blame] | 1564 | if (ret) |
| 1565 | goto out_unlock_bdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1566 | } |
| 1567 | } |
| 1568 | bdev->bd_openers++; |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1569 | if (for_part) |
| 1570 | bdev->bd_part_count++; |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1571 | if (claiming) |
| 1572 | bd_finish_claiming(bdev, claiming, holder); |
| 1573 | |
| 1574 | /* |
| 1575 | * Block event polling for write claims if requested. Any write holder |
| 1576 | * makes the write_holder state stick until all are released. This is |
| 1577 | * good enough and tracking individual writeable reference is too |
| 1578 | * fragile given the way @mode is used in blkdev_get/put(). |
| 1579 | */ |
| 1580 | if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder && |
| 1581 | (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { |
| 1582 | bdev->bd_write_holder = true; |
| 1583 | unblock_events = false; |
| 1584 | } |
Arjan van de Ven | c039e31 | 2006-03-23 03:00:28 -0800 | [diff] [blame] | 1585 | mutex_unlock(&bdev->bd_mutex); |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1586 | |
| 1587 | if (unblock_events) |
| 1588 | disk_unblock_events(disk); |
| 1589 | |
Jan Kara | 8973665 | 2018-02-26 13:01:40 +0100 | [diff] [blame] | 1590 | /* only one opener holds refs to the module and disk */ |
| 1591 | if (!first_open) |
| 1592 | put_disk_and_module(disk); |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1593 | if (whole) |
| 1594 | bdput(whole); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1595 | return 0; |
| 1596 | |
Tejun Heo | 0762b8b | 2008-08-25 19:56:12 +0900 | [diff] [blame] | 1597 | out_clear: |
Tejun Heo | 89f9749 | 2008-11-05 10:21:06 +0100 | [diff] [blame] | 1598 | disk_put_part(bdev->bd_part); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1599 | bdev->bd_disk = NULL; |
Tejun Heo | 0762b8b | 2008-08-25 19:56:12 +0900 | [diff] [blame] | 1600 | bdev->bd_part = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | if (bdev != bdev->bd_contains) |
Al Viro | 572c489 | 2007-10-08 13:24:05 -0400 | [diff] [blame] | 1602 | __blkdev_put(bdev->bd_contains, mode, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1603 | bdev->bd_contains = NULL; |
Tejun Heo | 0762b8b | 2008-08-25 19:56:12 +0900 | [diff] [blame] | 1604 | out_unlock_bdev: |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1605 | if (claiming) |
| 1606 | bd_abort_claiming(bdev, claiming, holder); |
Arjan van de Ven | c039e31 | 2006-03-23 03:00:28 -0800 | [diff] [blame] | 1607 | mutex_unlock(&bdev->bd_mutex); |
Tejun Heo | 69e02c5 | 2011-03-09 19:54:27 +0100 | [diff] [blame] | 1608 | disk_unblock_events(disk); |
Christoph Hellwig | 5b642d8b | 2020-07-16 16:33:10 +0200 | [diff] [blame] | 1609 | out_put_whole: |
| 1610 | if (whole) |
| 1611 | bdput(whole); |
| 1612 | out_put_disk: |
Jan Kara | 9df6c29 | 2018-02-26 13:01:39 +0100 | [diff] [blame] | 1613 | put_disk_and_module(disk); |
Christoph Hellwig | c5638ab | 2020-07-16 16:33:07 +0200 | [diff] [blame] | 1614 | if (need_restart) |
| 1615 | goto restart; |
Dan Carpenter | 4345cab | 2011-03-19 13:53:31 +0100 | [diff] [blame] | 1616 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | return ret; |
| 1618 | } |
| 1619 | |
Tejun Heo | d4d7762 | 2010-11-13 11:55:18 +0100 | [diff] [blame] | 1620 | /** |
| 1621 | * blkdev_get - open a block device |
| 1622 | * @bdev: block_device to open |
| 1623 | * @mode: FMODE_* mask |
| 1624 | * @holder: exclusive holder identifier |
| 1625 | * |
| 1626 | * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is |
| 1627 | * open with exclusive access. Specifying %FMODE_EXCL with %NULL |
| 1628 | * @holder is invalid. Exclusive opens may nest for the same @holder. |
| 1629 | * |
| 1630 | * On success, the reference count of @bdev is unchanged. On failure, |
| 1631 | * @bdev is put. |
| 1632 | * |
| 1633 | * CONTEXT: |
| 1634 | * Might sleep. |
| 1635 | * |
| 1636 | * RETURNS: |
| 1637 | * 0 on success, -errno on failure. |
| 1638 | */ |
Christoph Hellwig | 1fb1a2a | 2020-09-21 09:19:58 +0200 | [diff] [blame] | 1639 | static int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 | { |
Christoph Hellwig | e5c7fb4 | 2020-08-31 20:02:36 +0200 | [diff] [blame] | 1641 | int ret, perm = 0; |
Tejun Heo | e525fd8 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1642 | |
Christoph Hellwig | e5c7fb4 | 2020-08-31 20:02:36 +0200 | [diff] [blame] | 1643 | if (mode & FMODE_READ) |
| 1644 | perm |= MAY_READ; |
| 1645 | if (mode & FMODE_WRITE) |
| 1646 | perm |= MAY_WRITE; |
| 1647 | ret = devcgroup_inode_permission(bdev->bd_inode, perm); |
| 1648 | if (ret) |
| 1649 | goto bdput; |
| 1650 | |
| 1651 | ret =__blkdev_get(bdev, mode, holder, 0); |
| 1652 | if (ret) |
| 1653 | goto bdput; |
| 1654 | return 0; |
| 1655 | |
| 1656 | bdput: |
| 1657 | bdput(bdev); |
| 1658 | return ret; |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1659 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1660 | |
Tejun Heo | d4d7762 | 2010-11-13 11:55:18 +0100 | [diff] [blame] | 1661 | /** |
| 1662 | * blkdev_get_by_path - open a block device by name |
| 1663 | * @path: path to the block device to open |
| 1664 | * @mode: FMODE_* mask |
| 1665 | * @holder: exclusive holder identifier |
| 1666 | * |
| 1667 | * Open the blockdevice described by the device file at @path. @mode |
| 1668 | * and @holder are identical to blkdev_get(). |
| 1669 | * |
| 1670 | * On success, the returned block_device has reference count of one. |
| 1671 | * |
| 1672 | * CONTEXT: |
| 1673 | * Might sleep. |
| 1674 | * |
| 1675 | * RETURNS: |
| 1676 | * Pointer to block_device on success, ERR_PTR(-errno) on failure. |
| 1677 | */ |
| 1678 | struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, |
| 1679 | void *holder) |
| 1680 | { |
| 1681 | struct block_device *bdev; |
| 1682 | int err; |
| 1683 | |
| 1684 | bdev = lookup_bdev(path); |
| 1685 | if (IS_ERR(bdev)) |
| 1686 | return bdev; |
| 1687 | |
| 1688 | err = blkdev_get(bdev, mode, holder); |
| 1689 | if (err) |
| 1690 | return ERR_PTR(err); |
| 1691 | |
Chuck Ebbert | e51900f | 2011-02-16 18:11:53 -0500 | [diff] [blame] | 1692 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { |
| 1693 | blkdev_put(bdev, mode); |
| 1694 | return ERR_PTR(-EACCES); |
| 1695 | } |
| 1696 | |
Tejun Heo | d4d7762 | 2010-11-13 11:55:18 +0100 | [diff] [blame] | 1697 | return bdev; |
| 1698 | } |
| 1699 | EXPORT_SYMBOL(blkdev_get_by_path); |
| 1700 | |
| 1701 | /** |
| 1702 | * blkdev_get_by_dev - open a block device by device number |
| 1703 | * @dev: device number of block device to open |
| 1704 | * @mode: FMODE_* mask |
| 1705 | * @holder: exclusive holder identifier |
| 1706 | * |
| 1707 | * Open the blockdevice described by device number @dev. @mode and |
| 1708 | * @holder are identical to blkdev_get(). |
| 1709 | * |
| 1710 | * Use it ONLY if you really do not have anything better - i.e. when |
| 1711 | * you are behind a truly sucky interface and all you are given is a |
| 1712 | * device number. _Never_ to be used for internal purposes. If you |
| 1713 | * ever need it - reconsider your API. |
| 1714 | * |
| 1715 | * On success, the returned block_device has reference count of one. |
| 1716 | * |
| 1717 | * CONTEXT: |
| 1718 | * Might sleep. |
| 1719 | * |
| 1720 | * RETURNS: |
| 1721 | * Pointer to block_device on success, ERR_PTR(-errno) on failure. |
| 1722 | */ |
| 1723 | struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) |
| 1724 | { |
| 1725 | struct block_device *bdev; |
| 1726 | int err; |
| 1727 | |
| 1728 | bdev = bdget(dev); |
| 1729 | if (!bdev) |
| 1730 | return ERR_PTR(-ENOMEM); |
| 1731 | |
| 1732 | err = blkdev_get(bdev, mode, holder); |
| 1733 | if (err) |
| 1734 | return ERR_PTR(err); |
| 1735 | |
| 1736 | return bdev; |
| 1737 | } |
| 1738 | EXPORT_SYMBOL(blkdev_get_by_dev); |
| 1739 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1740 | static int blkdev_open(struct inode * inode, struct file * filp) |
| 1741 | { |
| 1742 | struct block_device *bdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1743 | |
| 1744 | /* |
| 1745 | * Preserve backwards compatibility and allow large file access |
| 1746 | * even if userspace doesn't ask for it explicitly. Some mkfs |
| 1747 | * binary needs it. We might want to drop this workaround |
| 1748 | * during an unstable branch. |
| 1749 | */ |
| 1750 | filp->f_flags |= O_LARGEFILE; |
| 1751 | |
Jens Axboe | a304f07 | 2020-05-22 09:14:08 -0600 | [diff] [blame] | 1752 | filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; |
Christoph Hellwig | c35fc7a | 2017-08-29 16:13:21 +0200 | [diff] [blame] | 1753 | |
Al Viro | 572c489 | 2007-10-08 13:24:05 -0400 | [diff] [blame] | 1754 | if (filp->f_flags & O_NDELAY) |
| 1755 | filp->f_mode |= FMODE_NDELAY; |
| 1756 | if (filp->f_flags & O_EXCL) |
| 1757 | filp->f_mode |= FMODE_EXCL; |
| 1758 | if ((filp->f_flags & O_ACCMODE) == 3) |
| 1759 | filp->f_mode |= FMODE_WRITE_IOCTL; |
| 1760 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1761 | bdev = bd_acquire(inode); |
Pavel Emelianov | 6a2aae0 | 2006-10-28 10:38:33 -0700 | [diff] [blame] | 1762 | if (bdev == NULL) |
| 1763 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1764 | |
Al Viro | 572c489 | 2007-10-08 13:24:05 -0400 | [diff] [blame] | 1765 | filp->f_mapping = bdev->bd_inode->i_mapping; |
Jeff Layton | 5660e13 | 2017-07-06 07:02:25 -0400 | [diff] [blame] | 1766 | filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); |
Al Viro | 572c489 | 2007-10-08 13:24:05 -0400 | [diff] [blame] | 1767 | |
Tejun Heo | e525fd8 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1768 | return blkdev_get(bdev, filp->f_mode, filp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1769 | } |
| 1770 | |
Al Viro | 4385bab | 2013-05-05 22:11:03 -0400 | [diff] [blame] | 1771 | static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1772 | { |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1773 | struct gendisk *disk = bdev->bd_disk; |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1774 | struct block_device *victim = NULL; |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1775 | |
Douglas Anderson | b849dd8 | 2020-03-24 14:48:27 -0700 | [diff] [blame] | 1776 | /* |
| 1777 | * Sync early if it looks like we're the last one. If someone else |
| 1778 | * opens the block device between now and the decrement of bd_openers |
| 1779 | * then we did a sync that we didn't need to, but that's not the end |
| 1780 | * of the world and we want to avoid long (could be several minute) |
| 1781 | * syncs while holding the mutex. |
| 1782 | */ |
| 1783 | if (bdev->bd_openers == 1) |
| 1784 | sync_blockdev(bdev); |
| 1785 | |
NeilBrown | 6796bf5 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1786 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1787 | if (for_part) |
| 1788 | bdev->bd_part_count--; |
| 1789 | |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1790 | if (!--bdev->bd_openers) { |
Tejun Heo | 6a027ef | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1791 | WARN_ON_ONCE(bdev->bd_holders); |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1792 | sync_blockdev(bdev); |
| 1793 | kill_bdev(bdev); |
Ilya Dryomov | 43d1c0e | 2015-11-20 22:22:34 +0100 | [diff] [blame] | 1794 | |
Vivek Goyal | dbd3ca5 | 2015-11-09 09:23:40 -0700 | [diff] [blame] | 1795 | bdev_write_inode(bdev); |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1796 | } |
| 1797 | if (bdev->bd_contains == bdev) { |
| 1798 | if (disk->fops->release) |
Al Viro | db2a144 | 2013-05-05 21:52:57 -0400 | [diff] [blame] | 1799 | disk->fops->release(disk, mode); |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1800 | } |
| 1801 | if (!bdev->bd_openers) { |
Tejun Heo | 0762b8b | 2008-08-25 19:56:12 +0900 | [diff] [blame] | 1802 | disk_put_part(bdev->bd_part); |
| 1803 | bdev->bd_part = NULL; |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1804 | bdev->bd_disk = NULL; |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1805 | if (bdev != bdev->bd_contains) |
| 1806 | victim = bdev->bd_contains; |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1807 | bdev->bd_contains = NULL; |
Tejun Heo | 523e1d3 | 2011-10-19 14:31:07 +0200 | [diff] [blame] | 1808 | |
Jan Kara | 9df6c29 | 2018-02-26 13:01:39 +0100 | [diff] [blame] | 1809 | put_disk_and_module(disk); |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1810 | } |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1811 | mutex_unlock(&bdev->bd_mutex); |
| 1812 | bdput(bdev); |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1813 | if (victim) |
Al Viro | 9a1c354 | 2008-02-22 20:40:24 -0500 | [diff] [blame] | 1814 | __blkdev_put(victim, mode, 1); |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1815 | } |
| 1816 | |
Al Viro | 4385bab | 2013-05-05 22:11:03 -0400 | [diff] [blame] | 1817 | void blkdev_put(struct block_device *bdev, fmode_t mode) |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1818 | { |
Tejun Heo | 85ef06d | 2011-07-01 16:17:47 +0200 | [diff] [blame] | 1819 | mutex_lock(&bdev->bd_mutex); |
| 1820 | |
Tejun Heo | e525fd8 | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1821 | if (mode & FMODE_EXCL) { |
Tejun Heo | 6a027ef | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1822 | bool bdev_free; |
| 1823 | |
| 1824 | /* |
| 1825 | * Release a claim on the device. The holder fields |
| 1826 | * are protected with bdev_lock. bd_mutex is to |
| 1827 | * synchronize disk_holder unlinking. |
| 1828 | */ |
Tejun Heo | 6a027ef | 2010-11-13 11:55:17 +0100 | [diff] [blame] | 1829 | spin_lock(&bdev_lock); |
| 1830 | |
| 1831 | WARN_ON_ONCE(--bdev->bd_holders < 0); |
| 1832 | WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); |
| 1833 | |
| 1834 | /* bd_contains might point to self, check in a separate step */ |
| 1835 | if ((bdev_free = !bdev->bd_holders)) |
| 1836 | bdev->bd_holder = NULL; |
| 1837 | if (!bdev->bd_contains->bd_holders) |
| 1838 | bdev->bd_contains->bd_holder = NULL; |
| 1839 | |
| 1840 | spin_unlock(&bdev_lock); |
| 1841 | |
Tejun Heo | 77ea887 | 2010-12-08 20:57:37 +0100 | [diff] [blame] | 1842 | /* |
| 1843 | * If this was the last claim, remove holder link and |
| 1844 | * unblock evpoll if it was a write holder. |
| 1845 | */ |
Tejun Heo | 85ef06d | 2011-07-01 16:17:47 +0200 | [diff] [blame] | 1846 | if (bdev_free && bdev->bd_write_holder) { |
| 1847 | disk_unblock_events(bdev->bd_disk); |
| 1848 | bdev->bd_write_holder = false; |
Tejun Heo | 77ea887 | 2010-12-08 20:57:37 +0100 | [diff] [blame] | 1849 | } |
Tejun Heo | 6936217 | 2011-03-09 19:54:27 +0100 | [diff] [blame] | 1850 | } |
Tejun Heo | 77ea887 | 2010-12-08 20:57:37 +0100 | [diff] [blame] | 1851 | |
Tejun Heo | 85ef06d | 2011-07-01 16:17:47 +0200 | [diff] [blame] | 1852 | /* |
| 1853 | * Trigger event checking and tell drivers to flush MEDIA_CHANGE |
| 1854 | * event. This is to ensure detection of media removal commanded |
| 1855 | * from userland - e.g. eject(1). |
| 1856 | */ |
| 1857 | disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); |
| 1858 | |
| 1859 | mutex_unlock(&bdev->bd_mutex); |
| 1860 | |
Al Viro | 4385bab | 2013-05-05 22:11:03 -0400 | [diff] [blame] | 1861 | __blkdev_put(bdev, mode, 0); |
NeilBrown | 37be412 | 2006-12-08 02:36:16 -0800 | [diff] [blame] | 1862 | } |
Peter Zijlstra | 2e7b651 | 2006-12-08 02:36:13 -0800 | [diff] [blame] | 1863 | EXPORT_SYMBOL(blkdev_put); |
| 1864 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1865 | static int blkdev_close(struct inode * inode, struct file * filp) |
| 1866 | { |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 1867 | struct block_device *bdev = I_BDEV(bdev_file_inode(filp)); |
Al Viro | 4385bab | 2013-05-05 22:11:03 -0400 | [diff] [blame] | 1868 | blkdev_put(bdev, filp->f_mode); |
| 1869 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1870 | } |
| 1871 | |
Arnd Bergmann | bb93e3a | 2005-06-23 00:10:15 -0700 | [diff] [blame] | 1872 | static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1873 | { |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 1874 | struct block_device *bdev = I_BDEV(bdev_file_inode(file)); |
Al Viro | 56b26ad | 2008-09-19 03:17:36 -0400 | [diff] [blame] | 1875 | fmode_t mode = file->f_mode; |
Christoph Hellwig | fd4ce1a | 2008-11-05 14:58:42 +0100 | [diff] [blame] | 1876 | |
| 1877 | /* |
| 1878 | * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have |
| 1879 | * to updated it before every ioctl. |
| 1880 | */ |
Al Viro | 56b26ad | 2008-09-19 03:17:36 -0400 | [diff] [blame] | 1881 | if (file->f_flags & O_NDELAY) |
Christoph Hellwig | fd4ce1a | 2008-11-05 14:58:42 +0100 | [diff] [blame] | 1882 | mode |= FMODE_NDELAY; |
| 1883 | else |
| 1884 | mode &= ~FMODE_NDELAY; |
| 1885 | |
Al Viro | 56b26ad | 2008-09-19 03:17:36 -0400 | [diff] [blame] | 1886 | return blkdev_ioctl(bdev, mode, cmd, arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1887 | } |
| 1888 | |
Theodore Ts'o | 87d8fe1 | 2009-01-03 09:47:09 -0500 | [diff] [blame] | 1889 | /* |
Christoph Hellwig | eef9938 | 2009-08-20 17:43:41 +0200 | [diff] [blame] | 1890 | * Write data to the block device. Only intended for the block device itself |
| 1891 | * and the raw driver which basically is a fake block device. |
| 1892 | * |
| 1893 | * Does not take i_mutex for the write and thus is not for general purpose |
| 1894 | * use. |
| 1895 | */ |
Al Viro | 1456c0a | 2014-04-03 03:21:50 -0400 | [diff] [blame] | 1896 | ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) |
Christoph Hellwig | eef9938 | 2009-08-20 17:43:41 +0200 | [diff] [blame] | 1897 | { |
| 1898 | struct file *file = iocb->ki_filp; |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 1899 | struct inode *bd_inode = bdev_file_inode(file); |
Al Viro | 7ec7b94 | 2015-04-07 11:35:14 -0400 | [diff] [blame] | 1900 | loff_t size = i_size_read(bd_inode); |
Jianpeng Ma | 53362a0 | 2012-08-02 09:50:39 +0200 | [diff] [blame] | 1901 | struct blk_plug plug; |
yangerkun | 7da9368 | 2021-04-01 15:18:07 +0800 | [diff] [blame] | 1902 | size_t shorted = 0; |
Christoph Hellwig | eef9938 | 2009-08-20 17:43:41 +0200 | [diff] [blame] | 1903 | ssize_t ret; |
Al Viro | 5f380c7 | 2015-04-07 11:28:12 -0400 | [diff] [blame] | 1904 | |
Al Viro | 7ec7b94 | 2015-04-07 11:35:14 -0400 | [diff] [blame] | 1905 | if (bdev_read_only(I_BDEV(bd_inode))) |
| 1906 | return -EPERM; |
Al Viro | 5f380c7 | 2015-04-07 11:28:12 -0400 | [diff] [blame] | 1907 | |
Christoph Hellwig | bb3247a39 | 2020-09-21 09:19:55 +0200 | [diff] [blame] | 1908 | if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) |
Darrick J. Wong | dc617f2 | 2019-08-20 07:55:16 -0700 | [diff] [blame] | 1909 | return -ETXTBSY; |
| 1910 | |
Al Viro | 7ec7b94 | 2015-04-07 11:35:14 -0400 | [diff] [blame] | 1911 | if (!iov_iter_count(from)) |
Al Viro | 5f380c7 | 2015-04-07 11:28:12 -0400 | [diff] [blame] | 1912 | return 0; |
| 1913 | |
Al Viro | 7ec7b94 | 2015-04-07 11:35:14 -0400 | [diff] [blame] | 1914 | if (iocb->ki_pos >= size) |
| 1915 | return -ENOSPC; |
| 1916 | |
Christoph Hellwig | c35fc7a | 2017-08-29 16:13:21 +0200 | [diff] [blame] | 1917 | if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) |
| 1918 | return -EOPNOTSUPP; |
| 1919 | |
yangerkun | 7da9368 | 2021-04-01 15:18:07 +0800 | [diff] [blame] | 1920 | size -= iocb->ki_pos; |
| 1921 | if (iov_iter_count(from) > size) { |
| 1922 | shorted = iov_iter_count(from) - size; |
| 1923 | iov_iter_truncate(from, size); |
| 1924 | } |
Christoph Hellwig | eef9938 | 2009-08-20 17:43:41 +0200 | [diff] [blame] | 1925 | |
Jianpeng Ma | 53362a0 | 2012-08-02 09:50:39 +0200 | [diff] [blame] | 1926 | blk_start_plug(&plug); |
Al Viro | 1456c0a | 2014-04-03 03:21:50 -0400 | [diff] [blame] | 1927 | ret = __generic_file_write_iter(iocb, from); |
Christoph Hellwig | e259221 | 2016-04-07 08:52:01 -0700 | [diff] [blame] | 1928 | if (ret > 0) |
| 1929 | ret = generic_write_sync(iocb, ret); |
yangerkun | 7da9368 | 2021-04-01 15:18:07 +0800 | [diff] [blame] | 1930 | iov_iter_reexpand(from, iov_iter_count(from) + shorted); |
Jianpeng Ma | 53362a0 | 2012-08-02 09:50:39 +0200 | [diff] [blame] | 1931 | blk_finish_plug(&plug); |
Christoph Hellwig | eef9938 | 2009-08-20 17:43:41 +0200 | [diff] [blame] | 1932 | return ret; |
| 1933 | } |
Al Viro | 1456c0a | 2014-04-03 03:21:50 -0400 | [diff] [blame] | 1934 | EXPORT_SYMBOL_GPL(blkdev_write_iter); |
Christoph Hellwig | eef9938 | 2009-08-20 17:43:41 +0200 | [diff] [blame] | 1935 | |
David Jeffery | b2de525 | 2014-09-29 10:21:10 -0400 | [diff] [blame] | 1936 | ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) |
Linus Torvalds | 684c9aa | 2012-12-07 16:48:39 -0800 | [diff] [blame] | 1937 | { |
| 1938 | struct file *file = iocb->ki_filp; |
Dan Williams | 4ebb16c | 2015-10-28 07:48:19 +0900 | [diff] [blame] | 1939 | struct inode *bd_inode = bdev_file_inode(file); |
Linus Torvalds | 684c9aa | 2012-12-07 16:48:39 -0800 | [diff] [blame] | 1940 | loff_t size = i_size_read(bd_inode); |
Al Viro | a886038 | 2014-04-02 20:02:21 -0400 | [diff] [blame] | 1941 | loff_t pos = iocb->ki_pos; |
yangerkun | 7da9368 | 2021-04-01 15:18:07 +0800 | [diff] [blame] | 1942 | size_t shorted = 0; |
| 1943 | ssize_t ret; |
Linus Torvalds | 684c9aa | 2012-12-07 16:48:39 -0800 | [diff] [blame] | 1944 | |
| 1945 | if (pos >= size) |
| 1946 | return 0; |
| 1947 | |
| 1948 | size -= pos; |
yangerkun | 7da9368 | 2021-04-01 15:18:07 +0800 | [diff] [blame] | 1949 | if (iov_iter_count(to) > size) { |
| 1950 | shorted = iov_iter_count(to) - size; |
| 1951 | iov_iter_truncate(to, size); |
| 1952 | } |
| 1953 | |
| 1954 | ret = generic_file_read_iter(iocb, to); |
| 1955 | iov_iter_reexpand(to, iov_iter_count(to) + shorted); |
| 1956 | return ret; |
Linus Torvalds | 684c9aa | 2012-12-07 16:48:39 -0800 | [diff] [blame] | 1957 | } |
David Jeffery | b2de525 | 2014-09-29 10:21:10 -0400 | [diff] [blame] | 1958 | EXPORT_SYMBOL_GPL(blkdev_read_iter); |
Linus Torvalds | 684c9aa | 2012-12-07 16:48:39 -0800 | [diff] [blame] | 1959 | |
Christoph Hellwig | eef9938 | 2009-08-20 17:43:41 +0200 | [diff] [blame] | 1960 | /* |
Theodore Ts'o | 87d8fe1 | 2009-01-03 09:47:09 -0500 | [diff] [blame] | 1961 | * Try to release a page associated with block device when the system |
| 1962 | * is under memory pressure. |
| 1963 | */ |
| 1964 | static int blkdev_releasepage(struct page *page, gfp_t wait) |
| 1965 | { |
| 1966 | struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; |
| 1967 | |
| 1968 | if (super && super->s_op->bdev_try_to_free_page) |
| 1969 | return super->s_op->bdev_try_to_free_page(super, page, wait); |
| 1970 | |
| 1971 | return try_to_free_buffers(page); |
| 1972 | } |
| 1973 | |
Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 1974 | static int blkdev_writepages(struct address_space *mapping, |
| 1975 | struct writeback_control *wbc) |
| 1976 | { |
Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 1977 | return generic_writepages(mapping, wbc); |
| 1978 | } |
| 1979 | |
Adrian Bunk | 4c54ac6 | 2008-02-18 13:48:31 +0100 | [diff] [blame] | 1980 | static const struct address_space_operations def_blk_aops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1981 | .readpage = blkdev_readpage, |
Matthew Wilcox (Oracle) | d438834 | 2020-06-01 21:47:02 -0700 | [diff] [blame] | 1982 | .readahead = blkdev_readahead, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1983 | .writepage = blkdev_writepage, |
Nick Piggin | 6272b5a | 2007-10-16 01:25:04 -0700 | [diff] [blame] | 1984 | .write_begin = blkdev_write_begin, |
| 1985 | .write_end = blkdev_write_end, |
Ross Zwisler | 7f6d5b5 | 2016-02-26 15:19:55 -0800 | [diff] [blame] | 1986 | .writepages = blkdev_writepages, |
Theodore Ts'o | 87d8fe1 | 2009-01-03 09:47:09 -0500 | [diff] [blame] | 1987 | .releasepage = blkdev_releasepage, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1988 | .direct_IO = blkdev_direct_IO, |
Jan Kara | 88dbcbb | 2018-12-28 00:39:16 -0800 | [diff] [blame] | 1989 | .migratepage = buffer_migrate_page_norefs, |
Mel Gorman | b459722 | 2013-07-03 15:02:05 -0700 | [diff] [blame] | 1990 | .is_dirty_writeback = buffer_check_dirty_writeback, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1991 | }; |
| 1992 | |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 1993 | #define BLKDEV_FALLOC_FL_SUPPORTED \ |
| 1994 | (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ |
| 1995 | FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE) |
| 1996 | |
| 1997 | static long blkdev_fallocate(struct file *file, int mode, loff_t start, |
| 1998 | loff_t len) |
| 1999 | { |
| 2000 | struct block_device *bdev = I_BDEV(bdev_file_inode(file)); |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 2001 | loff_t end = start + len - 1; |
| 2002 | loff_t isize; |
| 2003 | int error; |
| 2004 | |
| 2005 | /* Fail if we don't recognize the flags. */ |
| 2006 | if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED) |
| 2007 | return -EOPNOTSUPP; |
| 2008 | |
| 2009 | /* Don't go off the end of the device. */ |
| 2010 | isize = i_size_read(bdev->bd_inode); |
| 2011 | if (start >= isize) |
| 2012 | return -EINVAL; |
| 2013 | if (end >= isize) { |
| 2014 | if (mode & FALLOC_FL_KEEP_SIZE) { |
| 2015 | len = isize - start; |
| 2016 | end = start + len - 1; |
| 2017 | } else |
| 2018 | return -EINVAL; |
| 2019 | } |
| 2020 | |
| 2021 | /* |
| 2022 | * Don't allow IO that isn't aligned to logical block size. |
| 2023 | */ |
| 2024 | if ((start | len) & (bdev_logical_block_size(bdev) - 1)) |
| 2025 | return -EINVAL; |
| 2026 | |
| 2027 | /* Invalidate the page cache, including dirty pages. */ |
Jan Kara | 384d87e | 2020-09-04 10:58:52 +0200 | [diff] [blame] | 2028 | error = truncate_bdev_range(bdev, file->f_mode, start, end); |
| 2029 | if (error) |
| 2030 | return error; |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 2031 | |
| 2032 | switch (mode) { |
| 2033 | case FALLOC_FL_ZERO_RANGE: |
| 2034 | case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: |
| 2035 | error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, |
Christoph Hellwig | ee472d8 | 2017-04-05 19:21:08 +0200 | [diff] [blame] | 2036 | GFP_KERNEL, BLKDEV_ZERO_NOUNMAP); |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 2037 | break; |
| 2038 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: |
Christoph Hellwig | 3404512 | 2017-04-05 19:21:11 +0200 | [diff] [blame] | 2039 | error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, |
| 2040 | GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK); |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 2041 | break; |
| 2042 | case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE: |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 2043 | error = blkdev_issue_discard(bdev, start >> 9, len >> 9, |
| 2044 | GFP_KERNEL, 0); |
| 2045 | break; |
| 2046 | default: |
| 2047 | return -EOPNOTSUPP; |
| 2048 | } |
| 2049 | if (error) |
| 2050 | return error; |
| 2051 | |
| 2052 | /* |
| 2053 | * Invalidate again; if someone wandered in and dirtied a page, |
| 2054 | * the caller will be given -EBUSY. The third argument is |
| 2055 | * inclusive, so the rounding here is safe. |
| 2056 | */ |
Jan Kara | 384d87e | 2020-09-04 10:58:52 +0200 | [diff] [blame] | 2057 | return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 2058 | start >> PAGE_SHIFT, |
| 2059 | end >> PAGE_SHIFT); |
| 2060 | } |
| 2061 | |
Arjan van de Ven | 4b6f5d2 | 2006-03-28 01:56:42 -0800 | [diff] [blame] | 2062 | const struct file_operations def_blk_fops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | .open = blkdev_open, |
| 2064 | .release = blkdev_close, |
| 2065 | .llseek = block_llseek, |
Al Viro | a886038 | 2014-04-02 20:02:21 -0400 | [diff] [blame] | 2066 | .read_iter = blkdev_read_iter, |
Al Viro | 1456c0a | 2014-04-03 03:21:50 -0400 | [diff] [blame] | 2067 | .write_iter = blkdev_write_iter, |
Christoph Hellwig | eae83ce | 2018-11-30 08:31:52 -0700 | [diff] [blame] | 2068 | .iopoll = blkdev_iopoll, |
Dan Williams | acc93d3 | 2016-05-07 11:40:28 -0700 | [diff] [blame] | 2069 | .mmap = generic_file_mmap, |
Andrew Morton | b1dd3b2 | 2010-04-06 14:35:00 -0700 | [diff] [blame] | 2070 | .fsync = blkdev_fsync, |
Arnd Bergmann | bb93e3a | 2005-06-23 00:10:15 -0700 | [diff] [blame] | 2071 | .unlocked_ioctl = block_ioctl, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2072 | #ifdef CONFIG_COMPAT |
| 2073 | .compat_ioctl = compat_blkdev_ioctl, |
| 2074 | #endif |
Linus Torvalds | 1e8b333 | 2012-11-29 10:49:50 -0800 | [diff] [blame] | 2075 | .splice_read = generic_file_splice_read, |
Al Viro | 8d02076 | 2014-04-05 04:27:08 -0400 | [diff] [blame] | 2076 | .splice_write = iter_file_splice_write, |
Darrick J. Wong | 25f4c41 | 2016-10-11 13:51:11 -0700 | [diff] [blame] | 2077 | .fallocate = blkdev_fallocate, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2078 | }; |
| 2079 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2080 | /** |
| 2081 | * lookup_bdev - lookup a struct block_device by name |
Randy Dunlap | 94e2959 | 2009-01-06 14:41:15 -0800 | [diff] [blame] | 2082 | * @pathname: special file representing the block device |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2083 | * |
Randy Dunlap | 57d1b53 | 2008-10-09 10:42:38 +0200 | [diff] [blame] | 2084 | * Get a reference to the blockdevice at @pathname in the current |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2085 | * namespace if possible and return it. Return ERR_PTR(error) |
| 2086 | * otherwise. |
| 2087 | */ |
Al Viro | 421748e | 2008-08-02 01:04:36 -0400 | [diff] [blame] | 2088 | struct block_device *lookup_bdev(const char *pathname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2089 | { |
| 2090 | struct block_device *bdev; |
| 2091 | struct inode *inode; |
Al Viro | 421748e | 2008-08-02 01:04:36 -0400 | [diff] [blame] | 2092 | struct path path; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2093 | int error; |
| 2094 | |
Al Viro | 421748e | 2008-08-02 01:04:36 -0400 | [diff] [blame] | 2095 | if (!pathname || !*pathname) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2096 | return ERR_PTR(-EINVAL); |
| 2097 | |
Al Viro | 421748e | 2008-08-02 01:04:36 -0400 | [diff] [blame] | 2098 | error = kern_path(pathname, LOOKUP_FOLLOW, &path); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2099 | if (error) |
| 2100 | return ERR_PTR(error); |
| 2101 | |
David Howells | bb668734 | 2015-03-17 22:26:21 +0000 | [diff] [blame] | 2102 | inode = d_backing_inode(path.dentry); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2103 | error = -ENOTBLK; |
| 2104 | if (!S_ISBLK(inode->i_mode)) |
| 2105 | goto fail; |
| 2106 | error = -EACCES; |
Eric W. Biederman | a2982cc | 2016-06-09 15:34:02 -0500 | [diff] [blame] | 2107 | if (!may_open_dev(&path)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2108 | goto fail; |
| 2109 | error = -ENOMEM; |
| 2110 | bdev = bd_acquire(inode); |
| 2111 | if (!bdev) |
| 2112 | goto fail; |
| 2113 | out: |
Al Viro | 421748e | 2008-08-02 01:04:36 -0400 | [diff] [blame] | 2114 | path_put(&path); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2115 | return bdev; |
| 2116 | fail: |
| 2117 | bdev = ERR_PTR(error); |
| 2118 | goto out; |
| 2119 | } |
Al Viro | d5686b4 | 2008-08-01 05:00:11 -0400 | [diff] [blame] | 2120 | EXPORT_SYMBOL(lookup_bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2121 | |
NeilBrown | 93b270f | 2011-02-24 17:25:47 +1100 | [diff] [blame] | 2122 | int __invalidate_device(struct block_device *bdev, bool kill_dirty) |
David Howells | b71e8a4 | 2006-08-29 19:06:11 +0100 | [diff] [blame] | 2123 | { |
| 2124 | struct super_block *sb = get_super(bdev); |
| 2125 | int res = 0; |
| 2126 | |
| 2127 | if (sb) { |
| 2128 | /* |
| 2129 | * no need to lock the super, get_super holds the |
| 2130 | * read mutex so the filesystem cannot go away |
| 2131 | * under us (->put_super runs with the write lock |
| 2132 | * hold). |
| 2133 | */ |
| 2134 | shrink_dcache_sb(sb); |
NeilBrown | 93b270f | 2011-02-24 17:25:47 +1100 | [diff] [blame] | 2135 | res = invalidate_inodes(sb, kill_dirty); |
David Howells | b71e8a4 | 2006-08-29 19:06:11 +0100 | [diff] [blame] | 2136 | drop_super(sb); |
| 2137 | } |
Peter Zijlstra | f98393a | 2007-05-06 14:49:54 -0700 | [diff] [blame] | 2138 | invalidate_bdev(bdev); |
David Howells | b71e8a4 | 2006-08-29 19:06:11 +0100 | [diff] [blame] | 2139 | return res; |
| 2140 | } |
| 2141 | EXPORT_SYMBOL(__invalidate_device); |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2142 | |
| 2143 | void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) |
| 2144 | { |
| 2145 | struct inode *inode, *old_inode = NULL; |
| 2146 | |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 2147 | spin_lock(&blockdev_superblock->s_inode_list_lock); |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2148 | list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { |
| 2149 | struct address_space *mapping = inode->i_mapping; |
Rabin Vincent | af30922 | 2016-12-01 09:18:28 +0100 | [diff] [blame] | 2150 | struct block_device *bdev; |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2151 | |
| 2152 | spin_lock(&inode->i_lock); |
| 2153 | if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || |
| 2154 | mapping->nrpages == 0) { |
| 2155 | spin_unlock(&inode->i_lock); |
| 2156 | continue; |
| 2157 | } |
| 2158 | __iget(inode); |
| 2159 | spin_unlock(&inode->i_lock); |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 2160 | spin_unlock(&blockdev_superblock->s_inode_list_lock); |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2161 | /* |
| 2162 | * We hold a reference to 'inode' so it couldn't have been |
| 2163 | * removed from s_inodes list while we dropped the |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 2164 | * s_inode_list_lock We cannot iput the inode now as we can |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2165 | * be holding the last reference and we cannot iput it under |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 2166 | * s_inode_list_lock. So we keep the reference and iput it |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2167 | * later. |
| 2168 | */ |
| 2169 | iput(old_inode); |
| 2170 | old_inode = inode; |
Rabin Vincent | af30922 | 2016-12-01 09:18:28 +0100 | [diff] [blame] | 2171 | bdev = I_BDEV(inode); |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2172 | |
Rabin Vincent | af30922 | 2016-12-01 09:18:28 +0100 | [diff] [blame] | 2173 | mutex_lock(&bdev->bd_mutex); |
| 2174 | if (bdev->bd_openers) |
| 2175 | func(bdev, arg); |
| 2176 | mutex_unlock(&bdev->bd_mutex); |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2177 | |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 2178 | spin_lock(&blockdev_superblock->s_inode_list_lock); |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2179 | } |
Dave Chinner | 74278da | 2015-03-04 12:37:22 -0500 | [diff] [blame] | 2180 | spin_unlock(&blockdev_superblock->s_inode_list_lock); |
Jan Kara | 5c0d6b6 | 2012-07-03 16:45:31 +0200 | [diff] [blame] | 2181 | iput(old_inode); |
| 2182 | } |