blob: 9e84b1928b9401d0eee5057e5c09e72473e649d0 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/block_dev.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/fcntl.h>
12#include <linux/slab.h>
13#include <linux/kmod.h>
14#include <linux/major.h>
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -070015#include <linux/device_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/highmem.h>
17#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040018#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/module.h>
20#include <linux/blkpg.h>
Muthu Kumarb502bd12012-03-23 15:01:50 -070021#include <linux/magic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/buffer_head.h>
Al Viroff01bb42011-09-16 02:31:11 -040023#include <linux/swap.h>
Nick Piggin585d3bc2009-02-25 10:44:19 +010024#include <linux/pagevec.h>
David Howells811d7362006-08-29 19:06:09 +010025#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mpage.h>
27#include <linux/mount.h>
David Howells9030d162019-03-25 16:38:23 +000028#include <linux/pseudo_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/uio.h>
30#include <linux/namei.h>
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -070031#include <linux/log2.h>
Al Viroff01bb42011-09-16 02:31:11 -040032#include <linux/cleancache.h>
Christoph Hellwig189ce2b2016-10-31 11:59:25 -060033#include <linux/task_io_accounting_ops.h>
Darrick J. Wong25f4c412016-10-11 13:51:11 -070034#include <linux/falloc.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080035#include <linux/uaccess.h>
Domenico Andreoli56939e02020-03-23 08:22:15 -070036#include <linux/suspend.h>
David Howells07f3f052006-09-30 20:52:18 +020037#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39struct bdev_inode {
40 struct block_device bdev;
41 struct inode vfs_inode;
42};
43
Adrian Bunk4c54ac62008-02-18 13:48:31 +010044static const struct address_space_operations def_blk_aops;
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline struct bdev_inode *BDEV_I(struct inode *inode)
47{
48 return container_of(inode, struct bdev_inode, vfs_inode);
49}
50
Geert Uytterhoevenff5053f2015-06-26 13:58:32 +020051struct block_device *I_BDEV(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
53 return &BDEV_I(inode)->bdev;
54}
Linus Torvalds1da177e2005-04-16 15:20:36 -070055EXPORT_SYMBOL(I_BDEV);
56
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070057static void bdev_write_inode(struct block_device *bdev)
Christoph Hellwig564f00f2015-01-14 10:42:33 +010058{
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070059 struct inode *inode = bdev->bd_inode;
60 int ret;
61
Christoph Hellwig564f00f2015-01-14 10:42:33 +010062 spin_lock(&inode->i_lock);
63 while (inode->i_state & I_DIRTY) {
64 spin_unlock(&inode->i_lock);
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070065 ret = write_inode_now(inode, true);
66 if (ret) {
67 char name[BDEVNAME_SIZE];
68 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
69 "for block device %s (err=%d).\n",
70 bdevname(bdev, name), ret);
71 }
Christoph Hellwig564f00f2015-01-14 10:42:33 +010072 spin_lock(&inode->i_lock);
73 }
74 spin_unlock(&inode->i_lock);
75}
76
Peter Zijlstraf9a14392007-05-06 14:49:55 -070077/* Kill _all_ buffers and pagecache , dirty or not.. */
Zheng Bin3373a342020-06-18 12:21:38 +080078static void kill_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Al Viroff01bb42011-09-16 02:31:11 -040080 struct address_space *mapping = bdev->bd_inode->i_mapping;
81
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080082 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
Peter Zijlstraf9a14392007-05-06 14:49:55 -070083 return;
Al Viroff01bb42011-09-16 02:31:11 -040084
Peter Zijlstraf9a14392007-05-06 14:49:55 -070085 invalidate_bh_lrus();
Al Viroff01bb42011-09-16 02:31:11 -040086 truncate_inode_pages(mapping, 0);
Zheng Bin3373a342020-06-18 12:21:38 +080087}
Al Viroff01bb42011-09-16 02:31:11 -040088
89/* Invalidate clean unused buffers and pagecache. */
90void invalidate_bdev(struct block_device *bdev)
91{
92 struct address_space *mapping = bdev->bd_inode->i_mapping;
93
Andrey Ryabinina5f6a6a2017-05-03 14:56:02 -070094 if (mapping->nrpages) {
95 invalidate_bh_lrus();
96 lru_add_drain_all(); /* make sure all lru add caches are flushed */
97 invalidate_mapping_pages(mapping, 0, -1);
98 }
Al Viroff01bb42011-09-16 02:31:11 -040099 /* 99% of the time, we don't need to flush the cleancache on the bdev.
100 * But, for the strange corners, lets be cautious
101 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400102 cleancache_invalidate_inode(mapping);
Al Viroff01bb42011-09-16 02:31:11 -0400103}
104EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Jan Kara384d87e2020-09-04 10:58:52 +0200106/*
107 * Drop all buffers & page cache for given bdev range. This function bails
108 * with error if bdev has other exclusive owner (such as filesystem).
109 */
110int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
111 loff_t lstart, loff_t lend)
112{
113 struct block_device *claimed_bdev = NULL;
114 int err;
115
116 /*
117 * If we don't hold exclusive handle for the device, upgrade to it
118 * while we discard the buffer cache to avoid discarding buffers
119 * under live filesystem.
120 */
121 if (!(mode & FMODE_EXCL)) {
122 claimed_bdev = bdev->bd_contains;
123 err = bd_prepare_to_claim(bdev, claimed_bdev,
124 truncate_bdev_range);
125 if (err)
126 return err;
127 }
128 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
129 if (claimed_bdev)
130 bd_abort_claiming(bdev, claimed_bdev, truncate_bdev_range);
131 return 0;
132}
133EXPORT_SYMBOL(truncate_bdev_range);
134
Jan Kara04906b22019-01-14 09:48:10 +0100135static void set_init_blocksize(struct block_device *bdev)
136{
Christoph Hellwig6b7b1812020-06-26 10:01:55 +0200137 bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev));
Jan Kara04906b22019-01-14 09:48:10 +0100138}
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140int set_blocksize(struct block_device *bdev, int size)
141{
142 /* Size must be a power of two, and between 512 and PAGE_SIZE */
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -0700143 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 return -EINVAL;
145
146 /* Size cannot be smaller than the size supported by the device */
Martin K. Petersene1defc42009-05-22 17:17:49 -0400147 if (size < bdev_logical_block_size(bdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 return -EINVAL;
149
150 /* Don't change the size if it is same as current */
Christoph Hellwig6b7b1812020-06-26 10:01:55 +0200151 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 sync_blockdev(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 bdev->bd_inode->i_blkbits = blksize_bits(size);
154 kill_bdev(bdev);
155 }
156 return 0;
157}
158
159EXPORT_SYMBOL(set_blocksize);
160
161int sb_set_blocksize(struct super_block *sb, int size)
162{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 if (set_blocksize(sb->s_bdev, size))
164 return 0;
165 /* If we get here, we know size is power of two
166 * and it's value is between 512 and PAGE_SIZE */
167 sb->s_blocksize = size;
Coywolf Qi Hunt38885bd2006-03-24 03:18:05 -0800168 sb->s_blocksize_bits = blksize_bits(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 return sb->s_blocksize;
170}
171
172EXPORT_SYMBOL(sb_set_blocksize);
173
174int sb_min_blocksize(struct super_block *sb, int size)
175{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400176 int minsize = bdev_logical_block_size(sb->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 if (size < minsize)
178 size = minsize;
179 return sb_set_blocksize(sb, size);
180}
181
182EXPORT_SYMBOL(sb_min_blocksize);
183
184static int
185blkdev_get_block(struct inode *inode, sector_t iblock,
186 struct buffer_head *bh, int create)
187{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 bh->b_bdev = I_BDEV(inode);
189 bh->b_blocknr = iblock;
190 set_buffer_mapped(bh);
191 return 0;
192}
193
Dan Williams4ebb16c2015-10-28 07:48:19 +0900194static struct inode *bdev_file_inode(struct file *file)
195{
196 return file->f_mapping->host;
197}
198
Jens Axboe78250c02016-11-17 17:50:47 +0100199static unsigned int dio_bio_write_op(struct kiocb *iocb)
200{
201 unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
202
203 /* avoid the need for a I/O completion work item */
204 if (iocb->ki_flags & IOCB_DSYNC)
205 op |= REQ_FUA;
206 return op;
207}
208
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600209#define DIO_INLINE_BIO_VECS 4
210
211static void blkdev_bio_end_io_simple(struct bio *bio)
212{
213 struct task_struct *waiter = bio->bi_private;
214
215 WRITE_ONCE(bio->bi_private, NULL);
Jens Axboe06193172018-11-13 21:16:54 -0700216 blk_wake_io_task(waiter);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600217}
218
219static ssize_t
220__blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
221 int nr_pages)
222{
223 struct file *file = iocb->ki_filp;
224 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Christoph Hellwig9fec4a22019-06-26 15:49:26 +0200225 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600226 loff_t pos = iocb->ki_pos;
227 bool should_dirty = false;
228 struct bio bio;
229 ssize_t ret;
230 blk_qc_t qc;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600231
Jens Axboe9a794fb2016-11-22 08:12:39 -0700232 if ((pos | iov_iter_alignment(iter)) &
233 (bdev_logical_block_size(bdev) - 1))
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600234 return -EINVAL;
235
Jens Axboe72ecad22016-11-16 23:11:42 -0700236 if (nr_pages <= DIO_INLINE_BIO_VECS)
237 vecs = inline_vecs;
238 else {
Kees Cook6da2ec52018-06-12 13:55:00 -0700239 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
240 GFP_KERNEL);
Jens Axboe72ecad22016-11-16 23:11:42 -0700241 if (!vecs)
242 return -ENOMEM;
243 }
244
Ming Lei3a83f462016-11-22 08:57:21 -0700245 bio_init(&bio, vecs, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200246 bio_set_dev(&bio, bdev);
Damien Le Moal4d1a4762016-11-22 15:38:49 +0900247 bio.bi_iter.bi_sector = pos >> 9;
Jens Axboe45d06cf2017-06-27 11:01:22 -0600248 bio.bi_write_hint = iocb->ki_hint;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600249 bio.bi_private = current;
250 bio.bi_end_io = blkdev_bio_end_io_simple;
Adam Manzanares074111c2018-05-22 10:52:20 -0700251 bio.bi_ioprio = iocb->ki_ioprio;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600252
253 ret = bio_iov_iter_get_pages(&bio, iter);
254 if (unlikely(ret))
Martin Wilck9362dd12018-07-25 23:15:08 +0200255 goto out;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600256 ret = bio.bi_iter.bi_size;
257
258 if (iov_iter_rw(iter) == READ) {
Jens Axboe78250c02016-11-17 17:50:47 +0100259 bio.bi_opf = REQ_OP_READ;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600260 if (iter_is_iovec(iter))
261 should_dirty = true;
262 } else {
Jens Axboe78250c02016-11-17 17:50:47 +0100263 bio.bi_opf = dio_bio_write_op(iocb);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600264 task_io_account_write(ret);
265 }
Jens Axboed1e36282018-08-29 10:36:56 -0600266 if (iocb->ki_flags & IOCB_HIPRI)
Jens Axboe0bbb2802018-12-21 09:10:46 -0700267 bio_set_polled(&bio, iocb);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600268
269 qc = submit_bio(&bio);
270 for (;;) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -0800271 set_current_state(TASK_UNINTERRUPTIBLE);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600272 if (!READ_ONCE(bio.bi_private))
273 break;
274 if (!(iocb->ki_flags & IOCB_HIPRI) ||
Jens Axboe0a1b8b82018-11-26 08:24:43 -0700275 !blk_poll(bdev_get_queue(bdev), qc, true))
Ming Leie6249cd2020-05-03 09:54:22 +0800276 blk_io_schedule();
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600277 }
278 __set_current_state(TASK_RUNNING);
279
Christoph Hellwig9fec4a22019-06-26 15:49:26 +0200280 bio_release_pages(&bio, should_dirty);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200281 if (unlikely(bio.bi_status))
Linus Torvaldsc6b1e362017-07-03 10:34:51 -0700282 ret = blk_status_to_errno(bio.bi_status);
Jens Axboe9ae3b3f52017-06-28 15:30:13 -0600283
Martin Wilck9362dd12018-07-25 23:15:08 +0200284out:
285 if (vecs != inline_vecs)
286 kfree(vecs);
287
Jens Axboe9ae3b3f52017-06-28 15:30:13 -0600288 bio_uninit(&bio);
289
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600290 return ret;
291}
292
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700293struct blkdev_dio {
294 union {
295 struct kiocb *iocb;
296 struct task_struct *waiter;
297 };
298 size_t size;
299 atomic_t ref;
300 bool multi_bio : 1;
301 bool should_dirty : 1;
302 bool is_sync : 1;
303 struct bio bio;
304};
305
Kent Overstreet52190f82018-05-20 18:25:55 -0400306static struct bio_set blkdev_dio_pool;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700307
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700308static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
309{
310 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
311 struct request_queue *q = bdev_get_queue(bdev);
312
313 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
314}
315
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700316static void blkdev_bio_end_io(struct bio *bio)
317{
318 struct blkdev_dio *dio = bio->bi_private;
319 bool should_dirty = dio->should_dirty;
320
Jason Yana89afe52019-04-12 10:09:16 +0800321 if (bio->bi_status && !dio->bio.bi_status)
322 dio->bio.bi_status = bio->bi_status;
323
324 if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700325 if (!dio->is_sync) {
326 struct kiocb *iocb = dio->iocb;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200327 ssize_t ret;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700328
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200329 if (likely(!dio->bio.bi_status)) {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700330 ret = dio->size;
331 iocb->ki_pos += ret;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200332 } else {
333 ret = blk_status_to_errno(dio->bio.bi_status);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700334 }
335
336 dio->iocb->ki_complete(iocb, ret, 0);
Christoph Hellwig531724a2018-11-30 09:23:48 +0100337 if (dio->multi_bio)
338 bio_put(&dio->bio);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700339 } else {
340 struct task_struct *waiter = dio->waiter;
341
342 WRITE_ONCE(dio->waiter, NULL);
Jens Axboe06193172018-11-13 21:16:54 -0700343 blk_wake_io_task(waiter);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700344 }
345 }
346
347 if (should_dirty) {
348 bio_check_pages_dirty(bio);
349 } else {
Christoph Hellwig57dfe3c2019-06-26 15:49:25 +0200350 bio_release_pages(bio, false);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700351 bio_put(bio);
352 }
353}
354
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800355static ssize_t
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700356__blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800357{
358 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +0900359 struct inode *inode = bdev_file_inode(file);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700360 struct block_device *bdev = I_BDEV(inode);
Christoph Hellwig64d656a2016-12-22 19:20:45 +0100361 struct blk_plug plug;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700362 struct blkdev_dio *dio;
363 struct bio *bio;
Jens Axboecb700eb2018-11-15 19:56:53 -0700364 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
Christoph Hellwig690e5322017-01-24 14:50:19 +0100365 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700366 loff_t pos = iocb->ki_pos;
367 blk_qc_t qc = BLK_QC_T_NONE;
Jens Axboe7b6620d2019-08-15 11:09:16 -0600368 int ret = 0;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700369
Jens Axboe9a794fb2016-11-22 08:12:39 -0700370 if ((pos | iov_iter_alignment(iter)) &
371 (bdev_logical_block_size(bdev) - 1))
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700372 return -EINVAL;
373
Jens Axboe7b6620d2019-08-15 11:09:16 -0600374 bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700375
376 dio = container_of(bio, struct blkdev_dio, bio);
Christoph Hellwig690e5322017-01-24 14:50:19 +0100377 dio->is_sync = is_sync = is_sync_kiocb(iocb);
Christoph Hellwig531724a2018-11-30 09:23:48 +0100378 if (dio->is_sync) {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700379 dio->waiter = current;
Christoph Hellwig531724a2018-11-30 09:23:48 +0100380 bio_get(bio);
381 } else {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700382 dio->iocb = iocb;
Christoph Hellwig531724a2018-11-30 09:23:48 +0100383 }
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700384
385 dio->size = 0;
386 dio->multi_bio = false;
David Howells00e23702018-10-22 13:07:28 +0100387 dio->should_dirty = is_read && iter_is_iovec(iter);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700388
Jens Axboecb700eb2018-11-15 19:56:53 -0700389 /*
390 * Don't plug for HIPRI/polled IO, as those should go straight
391 * to issue
392 */
393 if (!is_poll)
394 blk_start_plug(&plug);
395
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700396 for (;;) {
Christoph Hellwig74d46992017-08-23 19:10:32 +0200397 bio_set_dev(bio, bdev);
Damien Le Moal4d1a4762016-11-22 15:38:49 +0900398 bio->bi_iter.bi_sector = pos >> 9;
Jens Axboe45d06cf2017-06-27 11:01:22 -0600399 bio->bi_write_hint = iocb->ki_hint;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700400 bio->bi_private = dio;
401 bio->bi_end_io = blkdev_bio_end_io;
Adam Manzanares074111c2018-05-22 10:52:20 -0700402 bio->bi_ioprio = iocb->ki_ioprio;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700403
Jens Axboee15c2ff2019-08-06 13:34:31 -0600404 ret = bio_iov_iter_get_pages(bio, iter);
405 if (unlikely(ret)) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200406 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700407 bio_endio(bio);
408 break;
409 }
410
411 if (is_read) {
412 bio->bi_opf = REQ_OP_READ;
413 if (dio->should_dirty)
414 bio_set_pages_dirty(bio);
415 } else {
416 bio->bi_opf = dio_bio_write_op(iocb);
417 task_io_account_write(bio->bi_iter.bi_size);
418 }
419
Jens Axboe7b6620d2019-08-15 11:09:16 -0600420 dio->size += bio->bi_iter.bi_size;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700421 pos += bio->bi_iter.bi_size;
422
423 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
424 if (!nr_pages) {
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700425 bool polled = false;
426
427 if (iocb->ki_flags & IOCB_HIPRI) {
Jens Axboe0bbb2802018-12-21 09:10:46 -0700428 bio_set_polled(bio, iocb);
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700429 polled = true;
430 }
Jens Axboed34513d2018-11-06 14:29:11 -0700431
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700432 qc = submit_bio(bio);
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700433
434 if (polled)
435 WRITE_ONCE(iocb->ki_cookie, qc);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700436 break;
437 }
438
439 if (!dio->multi_bio) {
Christoph Hellwig531724a2018-11-30 09:23:48 +0100440 /*
441 * AIO needs an extra reference to ensure the dio
442 * structure which is embedded into the first bio
443 * stays around.
444 */
445 if (!is_sync)
446 bio_get(bio);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700447 dio->multi_bio = true;
448 atomic_set(&dio->ref, 2);
449 } else {
450 atomic_inc(&dio->ref);
451 }
452
Jens Axboe7b6620d2019-08-15 11:09:16 -0600453 submit_bio(bio);
454 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700455 }
Jens Axboecb700eb2018-11-15 19:56:53 -0700456
457 if (!is_poll)
458 blk_finish_plug(&plug);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700459
Christoph Hellwig690e5322017-01-24 14:50:19 +0100460 if (!is_sync)
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700461 return -EIOCBQUEUED;
462
463 for (;;) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -0800464 set_current_state(TASK_UNINTERRUPTIBLE);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700465 if (!READ_ONCE(dio->waiter))
466 break;
467
468 if (!(iocb->ki_flags & IOCB_HIPRI) ||
Jens Axboe0a1b8b82018-11-26 08:24:43 -0700469 !blk_poll(bdev_get_queue(bdev), qc, true))
Ming Leie6249cd2020-05-03 09:54:22 +0800470 blk_io_schedule();
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700471 }
472 __set_current_state(TASK_RUNNING);
473
Christoph Hellwig36ffc6c2017-06-03 09:38:00 +0200474 if (!ret)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200475 ret = blk_status_to_errno(dio->bio.bi_status);
Jens Axboee15c2ff2019-08-06 13:34:31 -0600476 if (likely(!ret))
477 ret = dio->size;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700478
479 bio_put(&dio->bio);
480 return ret;
481}
482
483static ssize_t
484blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
485{
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600486 int nr_pages;
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800487
Jens Axboe72ecad22016-11-16 23:11:42 -0700488 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600489 if (!nr_pages)
490 return 0;
Jens Axboe72ecad22016-11-16 23:11:42 -0700491 if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600492 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700493
494 return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800495}
496
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700497static __init int blkdev_init(void)
498{
Kent Overstreet52190f82018-05-20 18:25:55 -0400499 return bioset_init(&blkdev_dio_pool, 4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700500}
501module_init(blkdev_init);
502
Jan Kara5cee5812009-04-27 16:43:51 +0200503int __sync_blockdev(struct block_device *bdev, int wait)
504{
505 if (!bdev)
506 return 0;
507 if (!wait)
508 return filemap_flush(bdev->bd_inode->i_mapping);
509 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
510}
511
Nick Piggin585d3bc2009-02-25 10:44:19 +0100512/*
513 * Write out and wait upon all the dirty data associated with a block
514 * device via its mapping. Does not take the superblock lock.
515 */
516int sync_blockdev(struct block_device *bdev)
517{
Jan Kara5cee5812009-04-27 16:43:51 +0200518 return __sync_blockdev(bdev, 1);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100519}
520EXPORT_SYMBOL(sync_blockdev);
521
522/*
523 * Write out and wait upon all dirty data associated with this
524 * device. Filesystem data as well as the underlying block
525 * device. Takes the superblock lock.
526 */
527int fsync_bdev(struct block_device *bdev)
528{
529 struct super_block *sb = get_super(bdev);
530 if (sb) {
Jan Kara60b06802009-04-27 16:43:53 +0200531 int res = sync_filesystem(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100532 drop_super(sb);
533 return res;
534 }
535 return sync_blockdev(bdev);
536}
Al Viro47e44912009-04-01 07:07:16 -0400537EXPORT_SYMBOL(fsync_bdev);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100538
539/**
540 * freeze_bdev -- lock a filesystem and force it into a consistent state
541 * @bdev: blockdevice to lock
542 *
Nick Piggin585d3bc2009-02-25 10:44:19 +0100543 * If a superblock is found on this device, we take the s_umount semaphore
544 * on it to make sure nobody unmounts until the snapshot creation is done.
545 * The reference counter (bd_fsfreeze_count) guarantees that only the last
546 * unfreeze process can unfreeze the frozen filesystem actually when multiple
547 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
548 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
549 * actually.
550 */
551struct super_block *freeze_bdev(struct block_device *bdev)
552{
553 struct super_block *sb;
554 int error = 0;
555
556 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200557 if (++bdev->bd_fsfreeze_count > 1) {
558 /*
559 * We don't even need to grab a reference - the first call
560 * to freeze_bdev grab an active reference and only the last
561 * thaw_bdev drops it.
562 */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100563 sb = get_super(bdev);
Andrey Ryabinin5bb53c02016-08-23 18:55:31 +0300564 if (sb)
565 drop_super(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100566 mutex_unlock(&bdev->bd_fsfreeze_mutex);
567 return sb;
568 }
Nick Piggin585d3bc2009-02-25 10:44:19 +0100569
Christoph Hellwig45042302009-08-03 23:28:35 +0200570 sb = get_active_super(bdev);
571 if (!sb)
572 goto out;
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600573 if (sb->s_op->freeze_super)
574 error = sb->s_op->freeze_super(sb);
575 else
576 error = freeze_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400577 if (error) {
578 deactivate_super(sb);
579 bdev->bd_fsfreeze_count--;
Christoph Hellwig45042302009-08-03 23:28:35 +0200580 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Josef Bacik18e9e512010-03-23 10:34:56 -0400581 return ERR_PTR(error);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100582 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400583 deactivate_super(sb);
Christoph Hellwig45042302009-08-03 23:28:35 +0200584 out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100585 sync_blockdev(bdev);
586 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig4fadd7b2009-08-03 23:28:06 +0200587 return sb; /* thaw_bdev releases s->s_umount */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100588}
589EXPORT_SYMBOL(freeze_bdev);
590
591/**
592 * thaw_bdev -- unlock filesystem
593 * @bdev: blockdevice to unlock
594 * @sb: associated superblock
595 *
596 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
597 */
598int thaw_bdev(struct block_device *bdev, struct super_block *sb)
599{
Christoph Hellwig45042302009-08-03 23:28:35 +0200600 int error = -EINVAL;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100601
602 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200603 if (!bdev->bd_fsfreeze_count)
Josef Bacik18e9e512010-03-23 10:34:56 -0400604 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100605
Christoph Hellwig45042302009-08-03 23:28:35 +0200606 error = 0;
607 if (--bdev->bd_fsfreeze_count > 0)
Josef Bacik18e9e512010-03-23 10:34:56 -0400608 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100609
Christoph Hellwig45042302009-08-03 23:28:35 +0200610 if (!sb)
Josef Bacik18e9e512010-03-23 10:34:56 -0400611 goto out;
Christoph Hellwig45042302009-08-03 23:28:35 +0200612
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600613 if (sb->s_op->thaw_super)
614 error = sb->s_op->thaw_super(sb);
615 else
616 error = thaw_super(sb);
Pierre Morel997198b2016-10-04 10:53:40 +0200617 if (error)
Josef Bacik18e9e512010-03-23 10:34:56 -0400618 bdev->bd_fsfreeze_count++;
Josef Bacik18e9e512010-03-23 10:34:56 -0400619out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100620 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Pierre Morel997198b2016-10-04 10:53:40 +0200621 return error;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100622}
623EXPORT_SYMBOL(thaw_bdev);
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
626{
627 return block_write_full_page(page, blkdev_get_block, wbc);
628}
629
630static int blkdev_readpage(struct file * file, struct page * page)
631{
632 return block_read_full_page(page, blkdev_get_block);
633}
634
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700635static void blkdev_readahead(struct readahead_control *rac)
Akinobu Mita447f05b2014-10-09 15:26:58 -0700636{
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700637 mpage_readahead(rac, blkdev_get_block);
Akinobu Mita447f05b2014-10-09 15:26:58 -0700638}
639
Nick Piggin6272b5a2007-10-16 01:25:04 -0700640static int blkdev_write_begin(struct file *file, struct address_space *mapping,
641 loff_t pos, unsigned len, unsigned flags,
642 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643{
Christoph Hellwig155130a2010-06-04 11:29:58 +0200644 return block_write_begin(mapping, pos, len, flags, pagep,
645 blkdev_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646}
647
Nick Piggin6272b5a2007-10-16 01:25:04 -0700648static int blkdev_write_end(struct file *file, struct address_space *mapping,
649 loff_t pos, unsigned len, unsigned copied,
650 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
Nick Piggin6272b5a2007-10-16 01:25:04 -0700652 int ret;
653 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
654
655 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300656 put_page(page);
Nick Piggin6272b5a2007-10-16 01:25:04 -0700657
658 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659}
660
661/*
662 * private llseek:
Al Viro496ad9a2013-01-23 17:07:38 -0500663 * for a block special file file_inode(file)->i_size is zero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 * so we compute the size by hand (just as in block_read/write above)
665 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800666static loff_t block_llseek(struct file *file, loff_t offset, int whence)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900668 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 loff_t retval;
670
Al Viro59551022016-01-22 15:40:57 -0500671 inode_lock(bd_inode);
Al Viro5d48f3a2013-06-23 21:34:45 +0400672 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
Al Viro59551022016-01-22 15:40:57 -0500673 inode_unlock(bd_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 return retval;
675}
676
Josef Bacik02c24a82011-07-16 20:44:56 -0400677int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900679 struct inode *bd_inode = bdev_file_inode(filp);
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400680 struct block_device *bdev = I_BDEV(bd_inode);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100681 int error;
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200682
Jeff Layton372cf242017-07-06 07:02:28 -0400683 error = file_write_and_wait_range(filp, start, end);
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200684 if (error)
685 return error;
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100686
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400687 /*
688 * There is no need to serialise calls to blkdev_issue_flush with
689 * i_mutex and doing so causes performance issues with concurrent
690 * O_SYNC writers to a block device.
691 */
Christoph Hellwig93985542020-05-13 14:36:00 +0200692 error = blkdev_issue_flush(bdev, GFP_KERNEL);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100693 if (error == -EOPNOTSUPP)
694 error = 0;
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400695
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100696 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697}
Andrew Mortonb1dd3b22010-04-06 14:35:00 -0700698EXPORT_SYMBOL(blkdev_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700700/**
701 * bdev_read_page() - Start reading a page from a block device
702 * @bdev: The device to read the page from
703 * @sector: The offset on the device to read the page to (need not be aligned)
704 * @page: The page to read
705 *
706 * On entry, the page should be locked. It will be unlocked when the page
707 * has been read. If the block driver implements rw_page synchronously,
708 * that will be true on exit from this function, but it need not be.
709 *
710 * Errors returned by this function are usually "soft", eg out of memory, or
711 * queue full; callers should try a different route to read this page rather
712 * than propagate an error back up the stack.
713 *
714 * Return: negative errno if an error occurs, 0 if submission was successful.
715 */
716int bdev_read_page(struct block_device *bdev, sector_t sector,
717 struct page *page)
718{
719 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc952015-11-19 13:29:28 -0800720 int result = -EOPNOTSUPP;
721
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400722 if (!ops->rw_page || bdev_get_integrity(bdev))
Dan Williams2e6edc952015-11-19 13:29:28 -0800723 return result;
724
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200725 result = blk_queue_enter(bdev->bd_disk->queue, 0);
Dan Williams2e6edc952015-11-19 13:29:28 -0800726 if (result)
727 return result;
Tejun Heo3f289dc2018-07-18 04:47:36 -0700728 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
729 REQ_OP_READ);
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200730 blk_queue_exit(bdev->bd_disk->queue);
Dan Williams2e6edc952015-11-19 13:29:28 -0800731 return result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700732}
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700733
734/**
735 * bdev_write_page() - Start writing a page to a block device
736 * @bdev: The device to write the page to
737 * @sector: The offset on the device to write the page to (need not be aligned)
738 * @page: The page to write
739 * @wbc: The writeback_control for the write
740 *
741 * On entry, the page should be locked and not currently under writeback.
742 * On exit, if the write started successfully, the page will be unlocked and
743 * under writeback. If the write failed already (eg the driver failed to
744 * queue the page to the device), the page will still be locked. If the
745 * caller is a ->writepage implementation, it will need to unlock the page.
746 *
747 * Errors returned by this function are usually "soft", eg out of memory, or
748 * queue full; callers should try a different route to write this page rather
749 * than propagate an error back up the stack.
750 *
751 * Return: negative errno if an error occurs, 0 if submission was successful.
752 */
753int bdev_write_page(struct block_device *bdev, sector_t sector,
754 struct page *page, struct writeback_control *wbc)
755{
756 int result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700757 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc952015-11-19 13:29:28 -0800758
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400759 if (!ops->rw_page || bdev_get_integrity(bdev))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700760 return -EOPNOTSUPP;
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200761 result = blk_queue_enter(bdev->bd_disk->queue, 0);
Dan Williams2e6edc952015-11-19 13:29:28 -0800762 if (result)
763 return result;
764
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700765 set_page_writeback(page);
Tejun Heo3f289dc2018-07-18 04:47:36 -0700766 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
767 REQ_OP_WRITE);
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700768 if (result) {
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700769 end_page_writeback(page);
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700770 } else {
771 clean_page_buffers(page);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700772 unlock_page(page);
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700773 }
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200774 blk_queue_exit(bdev->bd_disk->queue);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700775 return result;
776}
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778/*
779 * pseudo-fs
780 */
781
782static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
Christoph Lametere18b8902006-12-06 20:33:20 -0800783static struct kmem_cache * bdev_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785static struct inode *bdev_alloc_inode(struct super_block *sb)
786{
Christoph Lametere94b1762006-12-06 20:33:17 -0800787 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 if (!ei)
789 return NULL;
790 return &ei->vfs_inode;
791}
792
Al Viro41149cb2019-04-10 15:12:38 -0400793static void bdev_free_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794{
Al Viro41149cb2019-04-10 15:12:38 -0400795 kmem_cache_free(bdev_cachep, BDEV_I(inode));
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100796}
797
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700798static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799{
800 struct bdev_inode *ei = (struct bdev_inode *) foo;
801 struct block_device *bdev = &ei->bdev;
802
Christoph Lametera35afb82007-05-16 22:10:57 -0700803 memset(bdev, 0, sizeof(*bdev));
804 mutex_init(&bdev->bd_mutex);
Tejun Heo49731ba2011-01-14 18:43:57 +0100805#ifdef CONFIG_SYSFS
806 INIT_LIST_HEAD(&bdev->bd_holder_disks);
807#endif
Jan Karaa5a79d02017-03-02 16:50:13 +0100808 bdev->bd_bdi = &noop_backing_dev_info;
Christoph Lametera35afb82007-05-16 22:10:57 -0700809 inode_init_once(&ei->vfs_inode);
Takashi Satofcccf502009-01-09 16:40:59 -0800810 /* Initialize mutex for freeze. */
811 mutex_init(&bdev->bd_fsfreeze_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812}
813
Al Virob57922d2010-06-07 14:34:48 -0400814static void bdev_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815{
816 struct block_device *bdev = &BDEV_I(inode)->bdev;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700817 truncate_inode_pages_final(&inode->i_data);
Al Virob57922d2010-06-07 14:34:48 -0400818 invalidate_inode_buffers(inode); /* is it needed here? */
Jan Karadbd57682012-05-03 14:48:02 +0200819 clear_inode(inode);
Jan Karaf7597412017-03-23 01:37:00 +0100820 /* Detach inode from wb early as bdi_put() may free bdi->wb */
821 inode_detach_wb(inode);
Jan Karaa5a79d02017-03-02 16:50:13 +0100822 if (bdev->bd_bdi != &noop_backing_dev_info) {
Jan Karab1d2dc562017-02-02 15:56:52 +0100823 bdi_put(bdev->bd_bdi);
Jan Karaa5a79d02017-03-02 16:50:13 +0100824 bdev->bd_bdi = &noop_backing_dev_info;
825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826}
827
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -0800828static const struct super_operations bdev_sops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 .statfs = simple_statfs,
830 .alloc_inode = bdev_alloc_inode,
Al Viro41149cb2019-04-10 15:12:38 -0400831 .free_inode = bdev_free_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 .drop_inode = generic_delete_inode,
Al Virob57922d2010-06-07 14:34:48 -0400833 .evict_inode = bdev_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834};
835
David Howells9030d162019-03-25 16:38:23 +0000836static int bd_init_fs_context(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837{
David Howells9030d162019-03-25 16:38:23 +0000838 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
839 if (!ctx)
840 return -ENOMEM;
841 fc->s_iflags |= SB_I_CGROUPWB;
842 ctx->ops = &bdev_sops;
843 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844}
845
846static struct file_system_type bd_type = {
847 .name = "bdev",
David Howells9030d162019-03-25 16:38:23 +0000848 .init_fs_context = bd_init_fs_context,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 .kill_sb = kill_anon_super,
850};
851
Tejun Heoa212b102015-05-22 17:13:33 -0400852struct super_block *blockdev_superblock __read_mostly;
853EXPORT_SYMBOL_GPL(blockdev_superblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855void __init bdev_cache_init(void)
856{
857 int err;
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300858 static struct vfsmount *bd_mnt;
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800861 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
Vladimir Davydov5d097052016-01-14 15:18:21 -0800862 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
Paul Mundt20c2df82007-07-20 10:11:58 +0900863 init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 err = register_filesystem(&bd_type);
865 if (err)
866 panic("Cannot register bdev pseudo-fs");
867 bd_mnt = kern_mount(&bd_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 if (IS_ERR(bd_mnt))
869 panic("Cannot create bdev pseudo-fs");
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300870 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871}
872
873/*
874 * Most likely _very_ bad one - but then it's hardly critical for small
875 * /dev and can be fixed when somebody will need really large one.
876 * Keep in mind that it will be fed through icache hash function too.
877 */
878static inline unsigned long hash(dev_t dev)
879{
880 return MAJOR(dev)+MINOR(dev);
881}
882
883static int bdev_test(struct inode *inode, void *data)
884{
885 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
886}
887
888static int bdev_set(struct inode *inode, void *data)
889{
890 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
891 return 0;
892}
893
Christoph Hellwig10ed1662020-09-25 18:06:18 +0200894static struct block_device *bdget(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
896 struct block_device *bdev;
897 struct inode *inode;
898
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800899 inode = iget5_locked(blockdev_superblock, hash(dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 bdev_test, bdev_set, &dev);
901
902 if (!inode)
903 return NULL;
904
905 bdev = &BDEV_I(inode)->bdev;
906
907 if (inode->i_state & I_NEW) {
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +0200908 spin_lock_init(&bdev->bd_size_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 bdev->bd_contains = NULL;
Lachlan McIlroy782b94c2011-06-30 11:01:45 +1000910 bdev->bd_super = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 bdev->bd_inode = inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 bdev->bd_part_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 inode->i_mode = S_IFBLK;
914 inode->i_rdev = dev;
915 inode->i_bdev = bdev;
916 inode->i_data.a_ops = &def_blk_aops;
917 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 unlock_new_inode(inode);
919 }
920 return bdev;
921}
922
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200923/**
924 * bdgrab -- Grab a reference to an already referenced block device
925 * @bdev: Block device to grab a reference to.
926 */
927struct block_device *bdgrab(struct block_device *bdev)
928{
Al Viro7de9c6ee2010-10-23 11:11:40 -0400929 ihold(bdev->bd_inode);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200930 return bdev;
931}
Anatol Pomozovc1681bf2013-04-01 09:47:56 -0700932EXPORT_SYMBOL(bdgrab);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200933
Christoph Hellwig10ed1662020-09-25 18:06:18 +0200934struct block_device *bdget_part(struct hd_struct *part)
935{
936 return bdget(part_devt(part));
937}
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939long nr_blockdev_pages(void)
940{
Christoph Hellwig1008fe62020-06-26 10:01:58 +0200941 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 long ret = 0;
Christoph Hellwig1008fe62020-06-26 10:01:58 +0200943
944 spin_lock(&blockdev_superblock->s_inode_list_lock);
945 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
946 ret += inode->i_mapping->nrpages;
947 spin_unlock(&blockdev_superblock->s_inode_list_lock);
948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 return ret;
950}
951
952void bdput(struct block_device *bdev)
953{
954 iput(bdev->bd_inode);
955}
956
957EXPORT_SYMBOL(bdput);
958
959static struct block_device *bd_acquire(struct inode *inode)
960{
961 struct block_device *bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 spin_lock(&bdev_lock);
964 bdev = inode->i_bdev;
Jan Karacccd9fb2017-02-21 18:09:48 +0100965 if (bdev && !inode_unhashed(bdev->bd_inode)) {
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100966 bdgrab(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 spin_unlock(&bdev_lock);
968 return bdev;
969 }
970 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700971
Jan Karacccd9fb2017-02-21 18:09:48 +0100972 /*
973 * i_bdev references block device inode that was already shut down
974 * (corresponding device got removed). Remove the reference and look
975 * up block device inode again just in case new device got
976 * reestablished under the same device number.
977 */
978 if (bdev)
979 bd_forget(inode);
980
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 bdev = bdget(inode->i_rdev);
982 if (bdev) {
983 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700984 if (!inode->i_bdev) {
985 /*
Al Viro7de9c6ee2010-10-23 11:11:40 -0400986 * We take an additional reference to bd_inode,
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700987 * and it's released in clear_inode() of inode.
988 * So, we can access it via ->i_mapping always
989 * without igrab().
990 */
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100991 bdgrab(bdev);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700992 inode->i_bdev = bdev;
993 inode->i_mapping = bdev->bd_inode->i_mapping;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700994 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 spin_unlock(&bdev_lock);
996 }
997 return bdev;
998}
999
1000/* Call when you free inode */
1001
1002void bd_forget(struct inode *inode)
1003{
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001004 struct block_device *bdev = NULL;
1005
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 spin_lock(&bdev_lock);
Yan Hongb4ea2ea2013-04-30 15:26:47 -07001007 if (!sb_is_blkdev_sb(inode->i_sb))
1008 bdev = inode->i_bdev;
Al Viroa4a4f942016-07-19 13:16:52 -04001009 inode->i_bdev = NULL;
1010 inode->i_mapping = &inode->i_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001012
1013 if (bdev)
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +01001014 bdput(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001017/**
1018 * bd_may_claim - test whether a block device can be claimed
1019 * @bdev: block device of interest
1020 * @whole: whole block device containing @bdev, may equal @bdev
1021 * @holder: holder trying to claim @bdev
1022 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001023 * Test whether @bdev can be claimed by @holder.
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001024 *
1025 * CONTEXT:
1026 * spin_lock(&bdev_lock).
1027 *
1028 * RETURNS:
1029 * %true if @bdev can be claimed, %false otherwise.
1030 */
1031static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
1032 void *holder)
1033{
1034 if (bdev->bd_holder == holder)
1035 return true; /* already a holder */
1036 else if (bdev->bd_holder != NULL)
1037 return false; /* held by someone else */
NeilBrownbcc7f5b2016-12-12 08:21:51 -07001038 else if (whole == bdev)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001039 return true; /* is a whole device which isn't held */
1040
Tejun Heoe525fd82010-11-13 11:55:17 +01001041 else if (whole->bd_holder == bd_may_claim)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001042 return true; /* is a partition of a device that is being partitioned */
1043 else if (whole->bd_holder != NULL)
1044 return false; /* is a partition of a held device */
1045 else
1046 return true; /* is a partition of an un-held device */
1047}
1048
1049/**
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001050 * bd_prepare_to_claim - claim a block device
Tejun Heo6b4517a2010-04-07 18:53:59 +09001051 * @bdev: block device of interest
1052 * @whole: the whole device containing @bdev, may equal @bdev
1053 * @holder: holder trying to claim @bdev
1054 *
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001055 * Claim @bdev. This function fails if @bdev is already claimed by another
1056 * holder and waits if another claiming is in progress. return, the caller
1057 * has ownership of bd_claiming and bd_holder[s].
Tejun Heo6b4517a2010-04-07 18:53:59 +09001058 *
1059 * RETURNS:
1060 * 0 if @bdev can be claimed, -EBUSY otherwise.
1061 */
Christoph Hellwigecbe6bc2020-07-16 16:33:09 +02001062int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
1063 void *holder)
Tejun Heo6b4517a2010-04-07 18:53:59 +09001064{
1065retry:
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001066 spin_lock(&bdev_lock);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001067 /* if someone else claimed, fail */
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001068 if (!bd_may_claim(bdev, whole, holder)) {
1069 spin_unlock(&bdev_lock);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001070 return -EBUSY;
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001071 }
Tejun Heo6b4517a2010-04-07 18:53:59 +09001072
Tejun Heoe75aa852010-08-04 17:59:39 +02001073 /* if claiming is already in progress, wait for it to finish */
1074 if (whole->bd_claiming) {
Tejun Heo6b4517a2010-04-07 18:53:59 +09001075 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
1076 DEFINE_WAIT(wait);
1077
1078 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
1079 spin_unlock(&bdev_lock);
1080 schedule();
1081 finish_wait(wq, &wait);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001082 goto retry;
1083 }
1084
1085 /* yay, all mine */
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001086 whole->bd_claiming = holder;
1087 spin_unlock(&bdev_lock);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001088 return 0;
1089}
Christoph Hellwigecbe6bc2020-07-16 16:33:09 +02001090EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
Tejun Heo6b4517a2010-04-07 18:53:59 +09001091
Jan Kara560e7cb2018-02-26 13:01:42 +01001092static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
1093{
1094 struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
1095
1096 if (!disk)
1097 return NULL;
1098 /*
1099 * Now that we hold gendisk reference we make sure bdev we looked up is
1100 * not stale. If it is, it means device got removed and created before
1101 * we looked up gendisk and we fail open in such case. Associating
1102 * unhashed bdev with newly created gendisk could lead to two bdevs
1103 * (and thus two independent caches) being associated with one device
1104 * which is bad.
1105 */
1106 if (inode_unhashed(bdev->bd_inode)) {
1107 put_disk_and_module(disk);
1108 return NULL;
1109 }
1110 return disk;
1111}
1112
Jan Kara89e524c02019-07-30 13:10:14 +02001113static void bd_clear_claiming(struct block_device *whole, void *holder)
1114{
1115 lockdep_assert_held(&bdev_lock);
1116 /* tell others that we're done */
1117 BUG_ON(whole->bd_claiming != holder);
1118 whole->bd_claiming = NULL;
1119 wake_up_bit(&whole->bd_claiming, 0);
1120}
1121
1122/**
1123 * bd_finish_claiming - finish claiming of a block device
1124 * @bdev: block device of interest
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001125 * @whole: whole block device
Jan Kara89e524c02019-07-30 13:10:14 +02001126 * @holder: holder that has claimed @bdev
1127 *
1128 * Finish exclusive open of a block device. Mark the device as exlusively
1129 * open by the holder and wake up all waiters for exclusive open to finish.
1130 */
Christoph Hellwig764b23b2020-06-20 09:16:36 +02001131static void bd_finish_claiming(struct block_device *bdev,
1132 struct block_device *whole, void *holder)
Jan Kara89e524c02019-07-30 13:10:14 +02001133{
1134 spin_lock(&bdev_lock);
1135 BUG_ON(!bd_may_claim(bdev, whole, holder));
1136 /*
1137 * Note that for a whole device bd_holders will be incremented twice,
1138 * and bd_holder will be set to bd_may_claim before being set to holder
1139 */
1140 whole->bd_holders++;
1141 whole->bd_holder = bd_may_claim;
1142 bdev->bd_holders++;
1143 bdev->bd_holder = holder;
1144 bd_clear_claiming(whole, holder);
1145 spin_unlock(&bdev_lock);
1146}
Jan Kara89e524c02019-07-30 13:10:14 +02001147
1148/**
1149 * bd_abort_claiming - abort claiming of a block device
1150 * @bdev: block device of interest
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001151 * @whole: whole block device
Jan Kara89e524c02019-07-30 13:10:14 +02001152 * @holder: holder that has claimed @bdev
1153 *
1154 * Abort claiming of a block device when the exclusive open failed. This can be
1155 * also used when exclusive open is not actually desired and we just needed
1156 * to block other exclusive openers for a while.
1157 */
1158void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
1159 void *holder)
1160{
1161 spin_lock(&bdev_lock);
1162 bd_clear_claiming(whole, holder);
1163 spin_unlock(&bdev_lock);
1164}
1165EXPORT_SYMBOL(bd_abort_claiming);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001166
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001167#ifdef CONFIG_SYSFS
Tejun Heo49731ba2011-01-14 18:43:57 +01001168struct bd_holder_disk {
1169 struct list_head list;
1170 struct gendisk *disk;
1171 int refcnt;
1172};
1173
1174static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
1175 struct gendisk *disk)
1176{
1177 struct bd_holder_disk *holder;
1178
1179 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
1180 if (holder->disk == disk)
1181 return holder;
1182 return NULL;
1183}
1184
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -07001185static int add_symlink(struct kobject *from, struct kobject *to)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001186{
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -07001187 return sysfs_create_link(from, to, kobject_name(to));
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001188}
1189
1190static void del_symlink(struct kobject *from, struct kobject *to)
1191{
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001192 sysfs_remove_link(from, kobject_name(to));
1193}
1194
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001195/**
Tejun Heoe09b4572010-11-13 11:55:17 +01001196 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1197 * @bdev: the claimed slave bdev
1198 * @disk: the holding disk
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001199 *
Tejun Heo49731ba2011-01-14 18:43:57 +01001200 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1201 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001202 * This functions creates the following sysfs symlinks.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001203 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001204 * - from "slaves" directory of the holder @disk to the claimed @bdev
1205 * - from "holders" directory of the @bdev to the holder @disk
1206 *
1207 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1208 * passed to bd_link_disk_holder(), then:
1209 *
1210 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1211 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1212 *
1213 * The caller must have claimed @bdev before calling this function and
1214 * ensure that both @bdev and @disk are valid during the creation and
1215 * lifetime of these symlinks.
1216 *
1217 * CONTEXT:
1218 * Might sleep.
1219 *
1220 * RETURNS:
1221 * 0 on success, -errno on failure.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001222 */
Tejun Heoe09b4572010-11-13 11:55:17 +01001223int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001224{
Tejun Heo49731ba2011-01-14 18:43:57 +01001225 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001226 int ret = 0;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001227
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001228 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001229
Tejun Heo49731ba2011-01-14 18:43:57 +01001230 WARN_ON_ONCE(!bdev->bd_holder);
Johannes Weiner4e916722007-07-15 23:41:25 -07001231
Tejun Heoe09b4572010-11-13 11:55:17 +01001232 /* FIXME: remove the following once add_disk() handles errors */
1233 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1234 goto out_unlock;
Johannes Weiner4e916722007-07-15 23:41:25 -07001235
Tejun Heo49731ba2011-01-14 18:43:57 +01001236 holder = bd_find_holder_disk(bdev, disk);
1237 if (holder) {
1238 holder->refcnt++;
Tejun Heoe09b4572010-11-13 11:55:17 +01001239 goto out_unlock;
1240 }
1241
Tejun Heo49731ba2011-01-14 18:43:57 +01001242 holder = kzalloc(sizeof(*holder), GFP_KERNEL);
1243 if (!holder) {
1244 ret = -ENOMEM;
1245 goto out_unlock;
1246 }
1247
1248 INIT_LIST_HEAD(&holder->list);
1249 holder->disk = disk;
1250 holder->refcnt = 1;
1251
1252 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1253 if (ret)
1254 goto out_free;
1255
1256 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1257 if (ret)
1258 goto out_del;
Tejun Heoe7407d12011-02-24 09:56:32 +01001259 /*
1260 * bdev could be deleted beneath us which would implicitly destroy
1261 * the holder directory. Hold on to it.
1262 */
1263 kobject_get(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001264
1265 list_add(&holder->list, &bdev->bd_holder_disks);
1266 goto out_unlock;
1267
1268out_del:
1269 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1270out_free:
1271 kfree(holder);
Tejun Heoe09b4572010-11-13 11:55:17 +01001272out_unlock:
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -08001273 mutex_unlock(&bdev->bd_mutex);
Tejun Heoe09b4572010-11-13 11:55:17 +01001274 return ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001275}
Tejun Heoe09b4572010-11-13 11:55:17 +01001276EXPORT_SYMBOL_GPL(bd_link_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001277
Tejun Heo49731ba2011-01-14 18:43:57 +01001278/**
1279 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1280 * @bdev: the calimed slave bdev
1281 * @disk: the holding disk
1282 *
1283 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1284 *
1285 * CONTEXT:
1286 * Might sleep.
1287 */
1288void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001289{
Tejun Heo49731ba2011-01-14 18:43:57 +01001290 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001291
Tejun Heo49731ba2011-01-14 18:43:57 +01001292 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001293
Tejun Heo49731ba2011-01-14 18:43:57 +01001294 holder = bd_find_holder_disk(bdev, disk);
1295
1296 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
1297 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1298 del_symlink(bdev->bd_part->holder_dir,
1299 &disk_to_dev(disk)->kobj);
Tejun Heoe7407d12011-02-24 09:56:32 +01001300 kobject_put(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001301 list_del_init(&holder->list);
1302 kfree(holder);
1303 }
1304
1305 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001306}
Tejun Heo49731ba2011-01-14 18:43:57 +01001307EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001308#endif
1309
Andrew Patterson0c002c22008-09-04 14:27:20 -06001310/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001311 * check_disk_size_change - checks for disk size change and adjusts bdev size.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001312 * @disk: struct gendisk to check
1313 * @bdev: struct bdev to adjust.
Christoph Hellwig5afb7832018-05-29 16:42:59 +02001314 * @verbose: if %true log a message about a size change if there is any
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001315 *
1316 * This routine checks to see if the bdev size does not match the disk size
shunki-fujita849cf552018-04-05 16:20:07 -07001317 * and adjusts it if it differs. When shrinking the bdev size, its all caches
1318 * are freed.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001319 */
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001320static void check_disk_size_change(struct gendisk *disk,
1321 struct block_device *bdev, bool verbose)
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001322{
1323 loff_t disk_size, bdev_size;
1324
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001325 spin_lock(&bdev->bd_size_lock);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001326 disk_size = (loff_t)get_capacity(disk) << 9;
1327 bdev_size = i_size_read(bdev->bd_inode);
1328 if (disk_size != bdev_size) {
Christoph Hellwig5afb7832018-05-29 16:42:59 +02001329 if (verbose) {
1330 printk(KERN_INFO
1331 "%s: detected capacity change from %lld to %lld\n",
1332 disk->disk_name, bdev_size, disk_size);
1333 }
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001334 i_size_write(bdev->bd_inode, disk_size);
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001335 }
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001336 spin_unlock(&bdev->bd_size_lock);
1337
1338 if (bdev_size > disk_size) {
1339 if (__invalidate_device(bdev, false))
Christoph Hellwig9a3ffbb2020-07-08 14:25:43 +02001340 pr_warn("VFS: busy inodes on resized disk %s\n",
1341 disk->disk_name);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001342 }
1343}
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001344
1345/**
Christoph Hellwig659e56b2020-09-01 17:57:43 +02001346 * revalidate_disk_size - checks for disk size change and adjusts bdev size.
1347 * @disk: struct gendisk to check
1348 * @verbose: if %true log a message about a size change if there is any
1349 *
1350 * This routine checks to see if the bdev size does not match the disk size
1351 * and adjusts it if it differs. When shrinking the bdev size, its all caches
1352 * are freed.
1353 */
1354void revalidate_disk_size(struct gendisk *disk, bool verbose)
1355{
1356 struct block_device *bdev;
1357
1358 /*
1359 * Hidden disks don't have associated bdev so there's no point in
1360 * revalidating them.
1361 */
1362 if (disk->flags & GENHD_FL_HIDDEN)
1363 return;
1364
1365 bdev = bdget_disk(disk, 0);
1366 if (bdev) {
1367 check_disk_size_change(disk, bdev, verbose);
1368 bdput(bdev);
1369 }
1370}
1371EXPORT_SYMBOL(revalidate_disk_size);
1372
Christoph Hellwig611bee52020-08-23 11:10:41 +02001373void bd_set_nr_sectors(struct block_device *bdev, sector_t sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374{
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001375 spin_lock(&bdev->bd_size_lock);
Christoph Hellwig611bee52020-08-23 11:10:41 +02001376 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001377 spin_unlock(&bdev->bd_size_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378}
Christoph Hellwig611bee52020-08-23 11:10:41 +02001379EXPORT_SYMBOL(bd_set_nr_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380
Al Viro4385bab2013-05-05 22:11:03 -04001381static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001382
Christoph Hellwig142fe8f2019-11-14 15:34:35 +01001383int bdev_disk_changed(struct block_device *bdev, bool invalidate)
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001384{
Christoph Hellwig142fe8f2019-11-14 15:34:35 +01001385 struct gendisk *disk = bdev->bd_disk;
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001386 int ret;
1387
Christoph Hellwigf0b870d2019-11-14 15:34:36 +01001388 lockdep_assert_held(&bdev->bd_mutex);
1389
Christoph Hellwig38430f02020-09-21 09:19:45 +02001390 clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
Christoph Hellwig6540fbf2020-09-01 17:57:41 +02001391
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001392rescan:
Christoph Hellwigd46430b2020-04-14 09:28:57 +02001393 ret = blk_drop_partitions(bdev);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001394 if (ret)
1395 return ret;
1396
Christoph Hellwigd981cb52020-03-18 09:12:06 +01001397 /*
1398 * Historically we only set the capacity to zero for devices that
1399 * support partitions (independ of actually having partitions created).
1400 * Doing that is rather inconsistent, but changing it broke legacy
1401 * udisks polling for legacy ide-cdrom devices. Use the crude check
1402 * below to get the sane behavior for most device while not breaking
1403 * userspace for this particular setup.
1404 */
1405 if (invalidate) {
1406 if (disk_part_scan_enabled(disk) ||
1407 !(disk->flags & GENHD_FL_REMOVABLE))
1408 set_capacity(disk, 0);
1409 } else {
1410 if (disk->fops->revalidate_disk)
1411 disk->fops->revalidate_disk(disk);
1412 }
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001413
1414 check_disk_size_change(disk, bdev, !invalidate);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001415
Christoph Hellwig142fe8f2019-11-14 15:34:35 +01001416 if (get_capacity(disk)) {
1417 ret = blk_add_partitions(disk, bdev);
1418 if (ret == -EAGAIN)
1419 goto rescan;
Eric Biggers490547c2019-12-02 10:21:34 -08001420 } else if (invalidate) {
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001421 /*
1422 * Tell userspace that the media / partition table may have
1423 * changed.
1424 */
1425 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001426 }
1427
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001428 return ret;
1429}
Christoph Hellwigf0b870d2019-11-14 15:34:36 +01001430/*
1431 * Only exported for for loop and dasd for historic reasons. Don't use in new
1432 * code!
1433 */
1434EXPORT_SYMBOL_GPL(bdev_disk_changed);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001435
Peter Zijlstra6d740cd2007-02-20 13:58:18 -08001436/*
1437 * bd_mutex locking:
1438 *
1439 * mutex_lock(part->bd_mutex)
1440 * mutex_lock_nested(whole->bd_mutex, 1)
1441 */
1442
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001443static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder,
1444 int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001446 struct block_device *whole = NULL, *claiming = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 struct gendisk *disk;
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001448 int ret;
Tejun Heocf771cb2008-09-03 09:01:09 +02001449 int partno;
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001450 bool first_open = false, unblock_events = true, need_restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
NeilBrownd3374822009-01-09 08:31:10 +11001452 restart:
Christoph Hellwigc5638ab2020-07-16 16:33:07 +02001453 need_restart = false;
Tejun Heo89f97492008-11-05 10:21:06 +01001454 ret = -ENXIO;
Jan Kara560e7cb2018-02-26 13:01:42 +01001455 disk = bdev_get_gendisk(bdev, &partno);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001456 if (!disk)
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001457 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001459 if (partno) {
1460 whole = bdget_disk(disk, 0);
1461 if (!whole) {
1462 ret = -ENOMEM;
1463 goto out_put_disk;
1464 }
1465 }
1466
1467 if (!for_part && (mode & FMODE_EXCL)) {
1468 WARN_ON_ONCE(!holder);
1469 if (whole)
1470 claiming = whole;
1471 else
1472 claiming = bdev;
1473 ret = bd_prepare_to_claim(bdev, claiming, holder);
1474 if (ret)
1475 goto out_put_whole;
1476 }
1477
Tejun Heo69e02c52011-03-09 19:54:27 +01001478 disk_block_events(disk);
NeilBrown6796bf52006-12-08 02:36:16 -08001479 mutex_lock_nested(&bdev->bd_mutex, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 if (!bdev->bd_openers) {
Jan Kara89736652018-02-26 13:01:40 +01001481 first_open = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 bdev->bd_disk = disk;
1483 bdev->bd_contains = bdev;
Christoph Hellwigc2ee0702017-08-23 19:10:31 +02001484 bdev->bd_partno = partno;
Dan Williams03cdadb2016-02-26 15:19:43 -08001485
Tejun Heocf771cb2008-09-03 09:01:09 +02001486 if (!partno) {
Tejun Heo89f97492008-11-05 10:21:06 +01001487 ret = -ENXIO;
1488 bdev->bd_part = disk_get_part(disk, partno);
1489 if (!bdev->bd_part)
1490 goto out_clear;
1491
Tejun Heo1196f8b2011-04-21 20:54:45 +02001492 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 if (disk->fops->open) {
Al Viro572c4892007-10-08 13:24:05 -04001494 ret = disk->fops->open(bdev, mode);
Christoph Hellwigc5638ab2020-07-16 16:33:07 +02001495 /*
1496 * If we lost a race with 'disk' being deleted,
1497 * try again. See md.c
1498 */
1499 if (ret == -ERESTARTSYS)
1500 need_restart = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 }
Tejun Heo7e697232011-05-23 13:26:07 +02001502
Jan Kara04906b22019-01-14 09:48:10 +01001503 if (!ret) {
Christoph Hellwig611bee52020-08-23 11:10:41 +02001504 bd_set_nr_sectors(bdev, get_capacity(disk));
Jan Kara04906b22019-01-14 09:48:10 +01001505 set_init_blocksize(bdev);
1506 }
Tejun Heo7e697232011-05-23 13:26:07 +02001507
Tejun Heo1196f8b2011-04-21 20:54:45 +02001508 /*
1509 * If the device is invalidated, rescan partition
1510 * if open succeeded or failed with -ENOMEDIUM.
1511 * The latter is necessary to prevent ghost
1512 * partitions on a removed medium.
1513 */
Christoph Hellwig38430f02020-09-21 09:19:45 +02001514 if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
Jan Kara731dc482019-10-21 10:37:59 +02001515 (!ret || ret == -ENOMEDIUM))
1516 bdev_disk_changed(bdev, ret == -ENOMEDIUM);
Dan Williams5a023cd2015-11-30 10:20:29 -08001517
Tejun Heo1196f8b2011-04-21 20:54:45 +02001518 if (ret)
1519 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 } else {
NeilBrown37be4122006-12-08 02:36:16 -08001521 BUG_ON(for_part);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001522 ret = __blkdev_get(whole, mode, NULL, 1);
1523 if (ret)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001524 goto out_clear;
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001525 bdev->bd_contains = bdgrab(whole);
Tejun Heo89f97492008-11-05 10:21:06 +01001526 bdev->bd_part = disk_get_part(disk, partno);
Tejun Heoe71bf0d2008-09-03 09:03:02 +02001527 if (!(disk->flags & GENHD_FL_UP) ||
Tejun Heo89f97492008-11-05 10:21:06 +01001528 !bdev->bd_part || !bdev->bd_part->nr_sects) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 ret = -ENXIO;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001530 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 }
Christoph Hellwig611bee52020-08-23 11:10:41 +02001532 bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects);
Jan Kara04906b22019-01-14 09:48:10 +01001533 set_init_blocksize(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 }
Jan Kara03e26272017-03-23 01:36:53 +01001535
1536 if (bdev->bd_bdi == &noop_backing_dev_info)
1537 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 if (bdev->bd_contains == bdev) {
Tejun Heo1196f8b2011-04-21 20:54:45 +02001540 ret = 0;
1541 if (bdev->bd_disk->fops->open)
Al Viro572c4892007-10-08 13:24:05 -04001542 ret = bdev->bd_disk->fops->open(bdev, mode);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001543 /* the same as first opener case, read comment there */
Christoph Hellwig38430f02020-09-21 09:19:45 +02001544 if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
Jan Kara731dc482019-10-21 10:37:59 +02001545 (!ret || ret == -ENOMEDIUM))
1546 bdev_disk_changed(bdev, ret == -ENOMEDIUM);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001547 if (ret)
1548 goto out_unlock_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 }
1550 }
1551 bdev->bd_openers++;
NeilBrown37be4122006-12-08 02:36:16 -08001552 if (for_part)
1553 bdev->bd_part_count++;
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001554 if (claiming)
1555 bd_finish_claiming(bdev, claiming, holder);
1556
1557 /*
1558 * Block event polling for write claims if requested. Any write holder
1559 * makes the write_holder state stick until all are released. This is
1560 * good enough and tracking individual writeable reference is too
1561 * fragile given the way @mode is used in blkdev_get/put().
1562 */
1563 if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1564 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1565 bdev->bd_write_holder = true;
1566 unblock_events = false;
1567 }
Arjan van de Venc039e312006-03-23 03:00:28 -08001568 mutex_unlock(&bdev->bd_mutex);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001569
1570 if (unblock_events)
1571 disk_unblock_events(disk);
1572
Jan Kara89736652018-02-26 13:01:40 +01001573 /* only one opener holds refs to the module and disk */
1574 if (!first_open)
1575 put_disk_and_module(disk);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001576 if (whole)
1577 bdput(whole);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 return 0;
1579
Tejun Heo0762b8b2008-08-25 19:56:12 +09001580 out_clear:
Tejun Heo89f97492008-11-05 10:21:06 +01001581 disk_put_part(bdev->bd_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 bdev->bd_disk = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001583 bdev->bd_part = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 if (bdev != bdev->bd_contains)
Al Viro572c4892007-10-08 13:24:05 -04001585 __blkdev_put(bdev->bd_contains, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 bdev->bd_contains = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001587 out_unlock_bdev:
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001588 if (claiming)
1589 bd_abort_claiming(bdev, claiming, holder);
Arjan van de Venc039e312006-03-23 03:00:28 -08001590 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001591 disk_unblock_events(disk);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001592 out_put_whole:
1593 if (whole)
1594 bdput(whole);
1595 out_put_disk:
Jan Kara9df6c292018-02-26 13:01:39 +01001596 put_disk_and_module(disk);
Christoph Hellwigc5638ab2020-07-16 16:33:07 +02001597 if (need_restart)
1598 goto restart;
Dan Carpenter4345cab2011-03-19 13:53:31 +01001599 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 return ret;
1601}
1602
Tejun Heod4d77622010-11-13 11:55:18 +01001603/**
1604 * blkdev_get - open a block device
1605 * @bdev: block_device to open
1606 * @mode: FMODE_* mask
1607 * @holder: exclusive holder identifier
1608 *
1609 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1610 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1611 * @holder is invalid. Exclusive opens may nest for the same @holder.
1612 *
1613 * On success, the reference count of @bdev is unchanged. On failure,
1614 * @bdev is put.
1615 *
1616 * CONTEXT:
1617 * Might sleep.
1618 *
1619 * RETURNS:
1620 * 0 on success, -errno on failure.
1621 */
Christoph Hellwig1fb1a2a2020-09-21 09:19:58 +02001622static int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623{
Christoph Hellwige5c7fb42020-08-31 20:02:36 +02001624 int ret, perm = 0;
Tejun Heoe525fd82010-11-13 11:55:17 +01001625
Christoph Hellwige5c7fb42020-08-31 20:02:36 +02001626 if (mode & FMODE_READ)
1627 perm |= MAY_READ;
1628 if (mode & FMODE_WRITE)
1629 perm |= MAY_WRITE;
1630 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1631 if (ret)
1632 goto bdput;
1633
1634 ret =__blkdev_get(bdev, mode, holder, 0);
1635 if (ret)
1636 goto bdput;
1637 return 0;
1638
1639bdput:
1640 bdput(bdev);
1641 return ret;
NeilBrown37be4122006-12-08 02:36:16 -08001642}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Tejun Heod4d77622010-11-13 11:55:18 +01001644/**
1645 * blkdev_get_by_path - open a block device by name
1646 * @path: path to the block device to open
1647 * @mode: FMODE_* mask
1648 * @holder: exclusive holder identifier
1649 *
1650 * Open the blockdevice described by the device file at @path. @mode
1651 * and @holder are identical to blkdev_get().
1652 *
1653 * On success, the returned block_device has reference count of one.
1654 *
1655 * CONTEXT:
1656 * Might sleep.
1657 *
1658 * RETURNS:
1659 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1660 */
1661struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1662 void *holder)
1663{
1664 struct block_device *bdev;
1665 int err;
1666
1667 bdev = lookup_bdev(path);
1668 if (IS_ERR(bdev))
1669 return bdev;
1670
1671 err = blkdev_get(bdev, mode, holder);
1672 if (err)
1673 return ERR_PTR(err);
1674
Chuck Ebberte51900f2011-02-16 18:11:53 -05001675 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1676 blkdev_put(bdev, mode);
1677 return ERR_PTR(-EACCES);
1678 }
1679
Tejun Heod4d77622010-11-13 11:55:18 +01001680 return bdev;
1681}
1682EXPORT_SYMBOL(blkdev_get_by_path);
1683
1684/**
1685 * blkdev_get_by_dev - open a block device by device number
1686 * @dev: device number of block device to open
1687 * @mode: FMODE_* mask
1688 * @holder: exclusive holder identifier
1689 *
1690 * Open the blockdevice described by device number @dev. @mode and
1691 * @holder are identical to blkdev_get().
1692 *
1693 * Use it ONLY if you really do not have anything better - i.e. when
1694 * you are behind a truly sucky interface and all you are given is a
1695 * device number. _Never_ to be used for internal purposes. If you
1696 * ever need it - reconsider your API.
1697 *
1698 * On success, the returned block_device has reference count of one.
1699 *
1700 * CONTEXT:
1701 * Might sleep.
1702 *
1703 * RETURNS:
1704 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1705 */
1706struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1707{
1708 struct block_device *bdev;
1709 int err;
1710
1711 bdev = bdget(dev);
1712 if (!bdev)
1713 return ERR_PTR(-ENOMEM);
1714
1715 err = blkdev_get(bdev, mode, holder);
1716 if (err)
1717 return ERR_PTR(err);
1718
1719 return bdev;
1720}
1721EXPORT_SYMBOL(blkdev_get_by_dev);
1722
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723static int blkdev_open(struct inode * inode, struct file * filp)
1724{
1725 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
1727 /*
1728 * Preserve backwards compatibility and allow large file access
1729 * even if userspace doesn't ask for it explicitly. Some mkfs
1730 * binary needs it. We might want to drop this workaround
1731 * during an unstable branch.
1732 */
1733 filp->f_flags |= O_LARGEFILE;
1734
Jens Axboea304f072020-05-22 09:14:08 -06001735 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
Christoph Hellwigc35fc7a2017-08-29 16:13:21 +02001736
Al Viro572c4892007-10-08 13:24:05 -04001737 if (filp->f_flags & O_NDELAY)
1738 filp->f_mode |= FMODE_NDELAY;
1739 if (filp->f_flags & O_EXCL)
1740 filp->f_mode |= FMODE_EXCL;
1741 if ((filp->f_flags & O_ACCMODE) == 3)
1742 filp->f_mode |= FMODE_WRITE_IOCTL;
1743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 bdev = bd_acquire(inode);
Pavel Emelianov6a2aae02006-10-28 10:38:33 -07001745 if (bdev == NULL)
1746 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Al Viro572c4892007-10-08 13:24:05 -04001748 filp->f_mapping = bdev->bd_inode->i_mapping;
Jeff Layton5660e132017-07-06 07:02:25 -04001749 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
Al Viro572c4892007-10-08 13:24:05 -04001750
Tejun Heoe525fd82010-11-13 11:55:17 +01001751 return blkdev_get(bdev, filp->f_mode, filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752}
1753
Al Viro4385bab2013-05-05 22:11:03 -04001754static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001755{
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001756 struct gendisk *disk = bdev->bd_disk;
NeilBrown37be4122006-12-08 02:36:16 -08001757 struct block_device *victim = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001758
Douglas Andersonb849dd82020-03-24 14:48:27 -07001759 /*
1760 * Sync early if it looks like we're the last one. If someone else
1761 * opens the block device between now and the decrement of bd_openers
1762 * then we did a sync that we didn't need to, but that's not the end
1763 * of the world and we want to avoid long (could be several minute)
1764 * syncs while holding the mutex.
1765 */
1766 if (bdev->bd_openers == 1)
1767 sync_blockdev(bdev);
1768
NeilBrown6796bf52006-12-08 02:36:16 -08001769 mutex_lock_nested(&bdev->bd_mutex, for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001770 if (for_part)
1771 bdev->bd_part_count--;
1772
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001773 if (!--bdev->bd_openers) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001774 WARN_ON_ONCE(bdev->bd_holders);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001775 sync_blockdev(bdev);
1776 kill_bdev(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001777
Vivek Goyaldbd3ca52015-11-09 09:23:40 -07001778 bdev_write_inode(bdev);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001779 }
1780 if (bdev->bd_contains == bdev) {
1781 if (disk->fops->release)
Al Virodb2a1442013-05-05 21:52:57 -04001782 disk->fops->release(disk, mode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001783 }
1784 if (!bdev->bd_openers) {
Tejun Heo0762b8b2008-08-25 19:56:12 +09001785 disk_put_part(bdev->bd_part);
1786 bdev->bd_part = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001787 bdev->bd_disk = NULL;
NeilBrown37be4122006-12-08 02:36:16 -08001788 if (bdev != bdev->bd_contains)
1789 victim = bdev->bd_contains;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001790 bdev->bd_contains = NULL;
Tejun Heo523e1d32011-10-19 14:31:07 +02001791
Jan Kara9df6c292018-02-26 13:01:39 +01001792 put_disk_and_module(disk);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001793 }
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001794 mutex_unlock(&bdev->bd_mutex);
1795 bdput(bdev);
NeilBrown37be4122006-12-08 02:36:16 -08001796 if (victim)
Al Viro9a1c3542008-02-22 20:40:24 -05001797 __blkdev_put(victim, mode, 1);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001798}
1799
Al Viro4385bab2013-05-05 22:11:03 -04001800void blkdev_put(struct block_device *bdev, fmode_t mode)
NeilBrown37be4122006-12-08 02:36:16 -08001801{
Tejun Heo85ef06d2011-07-01 16:17:47 +02001802 mutex_lock(&bdev->bd_mutex);
1803
Tejun Heoe525fd82010-11-13 11:55:17 +01001804 if (mode & FMODE_EXCL) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001805 bool bdev_free;
1806
1807 /*
1808 * Release a claim on the device. The holder fields
1809 * are protected with bdev_lock. bd_mutex is to
1810 * synchronize disk_holder unlinking.
1811 */
Tejun Heo6a027ef2010-11-13 11:55:17 +01001812 spin_lock(&bdev_lock);
1813
1814 WARN_ON_ONCE(--bdev->bd_holders < 0);
1815 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1816
1817 /* bd_contains might point to self, check in a separate step */
1818 if ((bdev_free = !bdev->bd_holders))
1819 bdev->bd_holder = NULL;
1820 if (!bdev->bd_contains->bd_holders)
1821 bdev->bd_contains->bd_holder = NULL;
1822
1823 spin_unlock(&bdev_lock);
1824
Tejun Heo77ea8872010-12-08 20:57:37 +01001825 /*
1826 * If this was the last claim, remove holder link and
1827 * unblock evpoll if it was a write holder.
1828 */
Tejun Heo85ef06d2011-07-01 16:17:47 +02001829 if (bdev_free && bdev->bd_write_holder) {
1830 disk_unblock_events(bdev->bd_disk);
1831 bdev->bd_write_holder = false;
Tejun Heo77ea8872010-12-08 20:57:37 +01001832 }
Tejun Heo69362172011-03-09 19:54:27 +01001833 }
Tejun Heo77ea8872010-12-08 20:57:37 +01001834
Tejun Heo85ef06d2011-07-01 16:17:47 +02001835 /*
1836 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1837 * event. This is to ensure detection of media removal commanded
1838 * from userland - e.g. eject(1).
1839 */
1840 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1841
1842 mutex_unlock(&bdev->bd_mutex);
1843
Al Viro4385bab2013-05-05 22:11:03 -04001844 __blkdev_put(bdev, mode, 0);
NeilBrown37be4122006-12-08 02:36:16 -08001845}
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001846EXPORT_SYMBOL(blkdev_put);
1847
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848static int blkdev_close(struct inode * inode, struct file * filp)
1849{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001850 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
Al Viro4385bab2013-05-05 22:11:03 -04001851 blkdev_put(bdev, filp->f_mode);
1852 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853}
1854
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001855static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001857 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Al Viro56b26ad2008-09-19 03:17:36 -04001858 fmode_t mode = file->f_mode;
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001859
1860 /*
1861 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1862 * to updated it before every ioctl.
1863 */
Al Viro56b26ad2008-09-19 03:17:36 -04001864 if (file->f_flags & O_NDELAY)
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001865 mode |= FMODE_NDELAY;
1866 else
1867 mode &= ~FMODE_NDELAY;
1868
Al Viro56b26ad2008-09-19 03:17:36 -04001869 return blkdev_ioctl(bdev, mode, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870}
1871
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001872/*
Christoph Hellwigeef99382009-08-20 17:43:41 +02001873 * Write data to the block device. Only intended for the block device itself
1874 * and the raw driver which basically is a fake block device.
1875 *
1876 * Does not take i_mutex for the write and thus is not for general purpose
1877 * use.
1878 */
Al Viro1456c0a2014-04-03 03:21:50 -04001879ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
Christoph Hellwigeef99382009-08-20 17:43:41 +02001880{
1881 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001882 struct inode *bd_inode = bdev_file_inode(file);
Al Viro7ec7b942015-04-07 11:35:14 -04001883 loff_t size = i_size_read(bd_inode);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001884 struct blk_plug plug;
Christoph Hellwigeef99382009-08-20 17:43:41 +02001885 ssize_t ret;
Al Viro5f380c72015-04-07 11:28:12 -04001886
Al Viro7ec7b942015-04-07 11:35:14 -04001887 if (bdev_read_only(I_BDEV(bd_inode)))
1888 return -EPERM;
Al Viro5f380c72015-04-07 11:28:12 -04001889
Christoph Hellwigbb3247a392020-09-21 09:19:55 +02001890 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
Darrick J. Wongdc617f22019-08-20 07:55:16 -07001891 return -ETXTBSY;
1892
Al Viro7ec7b942015-04-07 11:35:14 -04001893 if (!iov_iter_count(from))
Al Viro5f380c72015-04-07 11:28:12 -04001894 return 0;
1895
Al Viro7ec7b942015-04-07 11:35:14 -04001896 if (iocb->ki_pos >= size)
1897 return -ENOSPC;
1898
Christoph Hellwigc35fc7a2017-08-29 16:13:21 +02001899 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
1900 return -EOPNOTSUPP;
1901
Al Viro7ec7b942015-04-07 11:35:14 -04001902 iov_iter_truncate(from, size - iocb->ki_pos);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001903
Jianpeng Ma53362a02012-08-02 09:50:39 +02001904 blk_start_plug(&plug);
Al Viro1456c0a2014-04-03 03:21:50 -04001905 ret = __generic_file_write_iter(iocb, from);
Christoph Hellwige2592212016-04-07 08:52:01 -07001906 if (ret > 0)
1907 ret = generic_write_sync(iocb, ret);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001908 blk_finish_plug(&plug);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001909 return ret;
1910}
Al Viro1456c0a2014-04-03 03:21:50 -04001911EXPORT_SYMBOL_GPL(blkdev_write_iter);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001912
David Jefferyb2de5252014-09-29 10:21:10 -04001913ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001914{
1915 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001916 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001917 loff_t size = i_size_read(bd_inode);
Al Viroa8860382014-04-02 20:02:21 -04001918 loff_t pos = iocb->ki_pos;
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001919
1920 if (pos >= size)
1921 return 0;
1922
1923 size -= pos;
Al Viroa8860382014-04-02 20:02:21 -04001924 iov_iter_truncate(to, size);
1925 return generic_file_read_iter(iocb, to);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001926}
David Jefferyb2de5252014-09-29 10:21:10 -04001927EXPORT_SYMBOL_GPL(blkdev_read_iter);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001928
Christoph Hellwigeef99382009-08-20 17:43:41 +02001929/*
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001930 * Try to release a page associated with block device when the system
1931 * is under memory pressure.
1932 */
1933static int blkdev_releasepage(struct page *page, gfp_t wait)
1934{
1935 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1936
1937 if (super && super->s_op->bdev_try_to_free_page)
1938 return super->s_op->bdev_try_to_free_page(super, page, wait);
1939
1940 return try_to_free_buffers(page);
1941}
1942
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001943static int blkdev_writepages(struct address_space *mapping,
1944 struct writeback_control *wbc)
1945{
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001946 return generic_writepages(mapping, wbc);
1947}
1948
Adrian Bunk4c54ac62008-02-18 13:48:31 +01001949static const struct address_space_operations def_blk_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 .readpage = blkdev_readpage,
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -07001951 .readahead = blkdev_readahead,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 .writepage = blkdev_writepage,
Nick Piggin6272b5a2007-10-16 01:25:04 -07001953 .write_begin = blkdev_write_begin,
1954 .write_end = blkdev_write_end,
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001955 .writepages = blkdev_writepages,
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001956 .releasepage = blkdev_releasepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 .direct_IO = blkdev_direct_IO,
Jan Kara88dbcbb2018-12-28 00:39:16 -08001958 .migratepage = buffer_migrate_page_norefs,
Mel Gormanb4597222013-07-03 15:02:05 -07001959 .is_dirty_writeback = buffer_check_dirty_writeback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960};
1961
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001962#define BLKDEV_FALLOC_FL_SUPPORTED \
1963 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1964 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1965
1966static long blkdev_fallocate(struct file *file, int mode, loff_t start,
1967 loff_t len)
1968{
1969 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001970 loff_t end = start + len - 1;
1971 loff_t isize;
1972 int error;
1973
1974 /* Fail if we don't recognize the flags. */
1975 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
1976 return -EOPNOTSUPP;
1977
1978 /* Don't go off the end of the device. */
1979 isize = i_size_read(bdev->bd_inode);
1980 if (start >= isize)
1981 return -EINVAL;
1982 if (end >= isize) {
1983 if (mode & FALLOC_FL_KEEP_SIZE) {
1984 len = isize - start;
1985 end = start + len - 1;
1986 } else
1987 return -EINVAL;
1988 }
1989
1990 /*
1991 * Don't allow IO that isn't aligned to logical block size.
1992 */
1993 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
1994 return -EINVAL;
1995
1996 /* Invalidate the page cache, including dirty pages. */
Jan Kara384d87e2020-09-04 10:58:52 +02001997 error = truncate_bdev_range(bdev, file->f_mode, start, end);
1998 if (error)
1999 return error;
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002000
2001 switch (mode) {
2002 case FALLOC_FL_ZERO_RANGE:
2003 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
2004 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
Christoph Hellwigee472d82017-04-05 19:21:08 +02002005 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002006 break;
2007 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
Christoph Hellwig34045122017-04-05 19:21:11 +02002008 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
2009 GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002010 break;
2011 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002012 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
2013 GFP_KERNEL, 0);
2014 break;
2015 default:
2016 return -EOPNOTSUPP;
2017 }
2018 if (error)
2019 return error;
2020
2021 /*
2022 * Invalidate again; if someone wandered in and dirtied a page,
2023 * the caller will be given -EBUSY. The third argument is
2024 * inclusive, so the rounding here is safe.
2025 */
Jan Kara384d87e2020-09-04 10:58:52 +02002026 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002027 start >> PAGE_SHIFT,
2028 end >> PAGE_SHIFT);
2029}
2030
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08002031const struct file_operations def_blk_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 .open = blkdev_open,
2033 .release = blkdev_close,
2034 .llseek = block_llseek,
Al Viroa8860382014-04-02 20:02:21 -04002035 .read_iter = blkdev_read_iter,
Al Viro1456c0a2014-04-03 03:21:50 -04002036 .write_iter = blkdev_write_iter,
Christoph Hellwigeae83ce2018-11-30 08:31:52 -07002037 .iopoll = blkdev_iopoll,
Dan Williamsacc93d32016-05-07 11:40:28 -07002038 .mmap = generic_file_mmap,
Andrew Mortonb1dd3b22010-04-06 14:35:00 -07002039 .fsync = blkdev_fsync,
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07002040 .unlocked_ioctl = block_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041#ifdef CONFIG_COMPAT
2042 .compat_ioctl = compat_blkdev_ioctl,
2043#endif
Linus Torvalds1e8b3332012-11-29 10:49:50 -08002044 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04002045 .splice_write = iter_file_splice_write,
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002046 .fallocate = blkdev_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047};
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049/**
2050 * lookup_bdev - lookup a struct block_device by name
Randy Dunlap94e29592009-01-06 14:41:15 -08002051 * @pathname: special file representing the block device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 *
Randy Dunlap57d1b532008-10-09 10:42:38 +02002053 * Get a reference to the blockdevice at @pathname in the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 * namespace if possible and return it. Return ERR_PTR(error)
2055 * otherwise.
2056 */
Al Viro421748e2008-08-02 01:04:36 -04002057struct block_device *lookup_bdev(const char *pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058{
2059 struct block_device *bdev;
2060 struct inode *inode;
Al Viro421748e2008-08-02 01:04:36 -04002061 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 int error;
2063
Al Viro421748e2008-08-02 01:04:36 -04002064 if (!pathname || !*pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 return ERR_PTR(-EINVAL);
2066
Al Viro421748e2008-08-02 01:04:36 -04002067 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 if (error)
2069 return ERR_PTR(error);
2070
David Howellsbb6687342015-03-17 22:26:21 +00002071 inode = d_backing_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 error = -ENOTBLK;
2073 if (!S_ISBLK(inode->i_mode))
2074 goto fail;
2075 error = -EACCES;
Eric W. Biedermana2982cc2016-06-09 15:34:02 -05002076 if (!may_open_dev(&path))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 goto fail;
2078 error = -ENOMEM;
2079 bdev = bd_acquire(inode);
2080 if (!bdev)
2081 goto fail;
2082out:
Al Viro421748e2008-08-02 01:04:36 -04002083 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 return bdev;
2085fail:
2086 bdev = ERR_PTR(error);
2087 goto out;
2088}
Al Virod5686b42008-08-01 05:00:11 -04002089EXPORT_SYMBOL(lookup_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
NeilBrown93b270f2011-02-24 17:25:47 +11002091int __invalidate_device(struct block_device *bdev, bool kill_dirty)
David Howellsb71e8a42006-08-29 19:06:11 +01002092{
2093 struct super_block *sb = get_super(bdev);
2094 int res = 0;
2095
2096 if (sb) {
2097 /*
2098 * no need to lock the super, get_super holds the
2099 * read mutex so the filesystem cannot go away
2100 * under us (->put_super runs with the write lock
2101 * hold).
2102 */
2103 shrink_dcache_sb(sb);
NeilBrown93b270f2011-02-24 17:25:47 +11002104 res = invalidate_inodes(sb, kill_dirty);
David Howellsb71e8a42006-08-29 19:06:11 +01002105 drop_super(sb);
2106 }
Peter Zijlstraf98393a2007-05-06 14:49:54 -07002107 invalidate_bdev(bdev);
David Howellsb71e8a42006-08-29 19:06:11 +01002108 return res;
2109}
2110EXPORT_SYMBOL(__invalidate_device);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002111
2112void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
2113{
2114 struct inode *inode, *old_inode = NULL;
2115
Dave Chinner74278da2015-03-04 12:37:22 -05002116 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002117 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
2118 struct address_space *mapping = inode->i_mapping;
Rabin Vincentaf309222016-12-01 09:18:28 +01002119 struct block_device *bdev;
Jan Kara5c0d6b62012-07-03 16:45:31 +02002120
2121 spin_lock(&inode->i_lock);
2122 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
2123 mapping->nrpages == 0) {
2124 spin_unlock(&inode->i_lock);
2125 continue;
2126 }
2127 __iget(inode);
2128 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -05002129 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002130 /*
2131 * We hold a reference to 'inode' so it couldn't have been
2132 * removed from s_inodes list while we dropped the
Dave Chinner74278da2015-03-04 12:37:22 -05002133 * s_inode_list_lock We cannot iput the inode now as we can
Jan Kara5c0d6b62012-07-03 16:45:31 +02002134 * be holding the last reference and we cannot iput it under
Dave Chinner74278da2015-03-04 12:37:22 -05002135 * s_inode_list_lock. So we keep the reference and iput it
Jan Kara5c0d6b62012-07-03 16:45:31 +02002136 * later.
2137 */
2138 iput(old_inode);
2139 old_inode = inode;
Rabin Vincentaf309222016-12-01 09:18:28 +01002140 bdev = I_BDEV(inode);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002141
Rabin Vincentaf309222016-12-01 09:18:28 +01002142 mutex_lock(&bdev->bd_mutex);
2143 if (bdev->bd_openers)
2144 func(bdev, arg);
2145 mutex_unlock(&bdev->bd_mutex);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002146
Dave Chinner74278da2015-03-04 12:37:22 -05002147 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002148 }
Dave Chinner74278da2015-03-04 12:37:22 -05002149 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002150 iput(old_inode);
2151}