blob: fe201b757baa4e12c8bc0bd03fc8a6d73a83e7d4 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/fs/block_dev.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/fcntl.h>
12#include <linux/slab.h>
13#include <linux/kmod.h>
14#include <linux/major.h>
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -070015#include <linux/device_cgroup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/highmem.h>
17#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040018#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/module.h>
20#include <linux/blkpg.h>
Muthu Kumarb502bd12012-03-23 15:01:50 -070021#include <linux/magic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/buffer_head.h>
Al Viroff01bb42011-09-16 02:31:11 -040023#include <linux/swap.h>
Nick Piggin585d3bc2009-02-25 10:44:19 +010024#include <linux/pagevec.h>
David Howells811d7362006-08-29 19:06:09 +010025#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mpage.h>
27#include <linux/mount.h>
David Howells9030d162019-03-25 16:38:23 +000028#include <linux/pseudo_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/uio.h>
30#include <linux/namei.h>
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -070031#include <linux/log2.h>
Al Viroff01bb42011-09-16 02:31:11 -040032#include <linux/cleancache.h>
Christoph Hellwig189ce2b2016-10-31 11:59:25 -060033#include <linux/task_io_accounting_ops.h>
Darrick J. Wong25f4c412016-10-11 13:51:11 -070034#include <linux/falloc.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080035#include <linux/uaccess.h>
Domenico Andreoli56939e02020-03-23 08:22:15 -070036#include <linux/suspend.h>
David Howells07f3f052006-09-30 20:52:18 +020037#include "internal.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39struct bdev_inode {
40 struct block_device bdev;
41 struct inode vfs_inode;
42};
43
Adrian Bunk4c54ac62008-02-18 13:48:31 +010044static const struct address_space_operations def_blk_aops;
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046static inline struct bdev_inode *BDEV_I(struct inode *inode)
47{
48 return container_of(inode, struct bdev_inode, vfs_inode);
49}
50
Geert Uytterhoevenff5053f2015-06-26 13:58:32 +020051struct block_device *I_BDEV(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
53 return &BDEV_I(inode)->bdev;
54}
Linus Torvalds1da177e2005-04-16 15:20:36 -070055EXPORT_SYMBOL(I_BDEV);
56
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070057static void bdev_write_inode(struct block_device *bdev)
Christoph Hellwig564f00f2015-01-14 10:42:33 +010058{
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070059 struct inode *inode = bdev->bd_inode;
60 int ret;
61
Christoph Hellwig564f00f2015-01-14 10:42:33 +010062 spin_lock(&inode->i_lock);
63 while (inode->i_state & I_DIRTY) {
64 spin_unlock(&inode->i_lock);
Vivek Goyaldbd3ca52015-11-09 09:23:40 -070065 ret = write_inode_now(inode, true);
66 if (ret) {
67 char name[BDEVNAME_SIZE];
68 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
69 "for block device %s (err=%d).\n",
70 bdevname(bdev, name), ret);
71 }
Christoph Hellwig564f00f2015-01-14 10:42:33 +010072 spin_lock(&inode->i_lock);
73 }
74 spin_unlock(&inode->i_lock);
75}
76
Peter Zijlstraf9a14392007-05-06 14:49:55 -070077/* Kill _all_ buffers and pagecache , dirty or not.. */
Zheng Bin3373a342020-06-18 12:21:38 +080078static void kill_bdev(struct block_device *bdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Al Viroff01bb42011-09-16 02:31:11 -040080 struct address_space *mapping = bdev->bd_inode->i_mapping;
81
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080082 if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
Peter Zijlstraf9a14392007-05-06 14:49:55 -070083 return;
Al Viroff01bb42011-09-16 02:31:11 -040084
Peter Zijlstraf9a14392007-05-06 14:49:55 -070085 invalidate_bh_lrus();
Al Viroff01bb42011-09-16 02:31:11 -040086 truncate_inode_pages(mapping, 0);
Zheng Bin3373a342020-06-18 12:21:38 +080087}
Al Viroff01bb42011-09-16 02:31:11 -040088
89/* Invalidate clean unused buffers and pagecache. */
90void invalidate_bdev(struct block_device *bdev)
91{
92 struct address_space *mapping = bdev->bd_inode->i_mapping;
93
Andrey Ryabinina5f6a6a2017-05-03 14:56:02 -070094 if (mapping->nrpages) {
95 invalidate_bh_lrus();
96 lru_add_drain_all(); /* make sure all lru add caches are flushed */
97 invalidate_mapping_pages(mapping, 0, -1);
98 }
Al Viroff01bb42011-09-16 02:31:11 -040099 /* 99% of the time, we don't need to flush the cleancache on the bdev.
100 * But, for the strange corners, lets be cautious
101 */
Dan Magenheimer31677602011-09-21 11:56:28 -0400102 cleancache_invalidate_inode(mapping);
Al Viroff01bb42011-09-16 02:31:11 -0400103}
104EXPORT_SYMBOL(invalidate_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Jan Kara384d87e2020-09-04 10:58:52 +0200106/*
107 * Drop all buffers & page cache for given bdev range. This function bails
108 * with error if bdev has other exclusive owner (such as filesystem).
109 */
110int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
111 loff_t lstart, loff_t lend)
112{
113 struct block_device *claimed_bdev = NULL;
114 int err;
115
116 /*
117 * If we don't hold exclusive handle for the device, upgrade to it
118 * while we discard the buffer cache to avoid discarding buffers
119 * under live filesystem.
120 */
121 if (!(mode & FMODE_EXCL)) {
122 claimed_bdev = bdev->bd_contains;
123 err = bd_prepare_to_claim(bdev, claimed_bdev,
124 truncate_bdev_range);
125 if (err)
Jan Karad44c9782021-02-22 10:48:09 +0100126 goto invalidate;
Jan Kara384d87e2020-09-04 10:58:52 +0200127 }
128 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
129 if (claimed_bdev)
130 bd_abort_claiming(bdev, claimed_bdev, truncate_bdev_range);
131 return 0;
Jan Karad44c9782021-02-22 10:48:09 +0100132
133invalidate:
134 /*
135 * Someone else has handle exclusively open. Try invalidating instead.
136 * The 'end' argument is inclusive so the rounding is safe.
137 */
138 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
139 lstart >> PAGE_SHIFT,
140 lend >> PAGE_SHIFT);
Jan Kara384d87e2020-09-04 10:58:52 +0200141}
142EXPORT_SYMBOL(truncate_bdev_range);
143
Jan Kara04906b22019-01-14 09:48:10 +0100144static void set_init_blocksize(struct block_device *bdev)
145{
Maxim Mikityanskiyf39005e2021-01-26 21:59:07 +0200146 unsigned int bsize = bdev_logical_block_size(bdev);
147 loff_t size = i_size_read(bdev->bd_inode);
148
149 while (bsize < PAGE_SIZE) {
150 if (size & bsize)
151 break;
152 bsize <<= 1;
153 }
154 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
Jan Kara04906b22019-01-14 09:48:10 +0100155}
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157int set_blocksize(struct block_device *bdev, int size)
158{
159 /* Size must be a power of two, and between 512 and PAGE_SIZE */
Vignesh Babu BM1368c4f2007-05-08 00:24:32 -0700160 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 return -EINVAL;
162
163 /* Size cannot be smaller than the size supported by the device */
Martin K. Petersene1defc42009-05-22 17:17:49 -0400164 if (size < bdev_logical_block_size(bdev))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 return -EINVAL;
166
167 /* Don't change the size if it is same as current */
Christoph Hellwig6b7b1812020-06-26 10:01:55 +0200168 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 sync_blockdev(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 bdev->bd_inode->i_blkbits = blksize_bits(size);
171 kill_bdev(bdev);
172 }
173 return 0;
174}
175
176EXPORT_SYMBOL(set_blocksize);
177
178int sb_set_blocksize(struct super_block *sb, int size)
179{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 if (set_blocksize(sb->s_bdev, size))
181 return 0;
182 /* If we get here, we know size is power of two
183 * and it's value is between 512 and PAGE_SIZE */
184 sb->s_blocksize = size;
Coywolf Qi Hunt38885bd2006-03-24 03:18:05 -0800185 sb->s_blocksize_bits = blksize_bits(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 return sb->s_blocksize;
187}
188
189EXPORT_SYMBOL(sb_set_blocksize);
190
191int sb_min_blocksize(struct super_block *sb, int size)
192{
Martin K. Petersene1defc42009-05-22 17:17:49 -0400193 int minsize = bdev_logical_block_size(sb->s_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 if (size < minsize)
195 size = minsize;
196 return sb_set_blocksize(sb, size);
197}
198
199EXPORT_SYMBOL(sb_min_blocksize);
200
201static int
202blkdev_get_block(struct inode *inode, sector_t iblock,
203 struct buffer_head *bh, int create)
204{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 bh->b_bdev = I_BDEV(inode);
206 bh->b_blocknr = iblock;
207 set_buffer_mapped(bh);
208 return 0;
209}
210
Dan Williams4ebb16c2015-10-28 07:48:19 +0900211static struct inode *bdev_file_inode(struct file *file)
212{
213 return file->f_mapping->host;
214}
215
Jens Axboe78250c02016-11-17 17:50:47 +0100216static unsigned int dio_bio_write_op(struct kiocb *iocb)
217{
218 unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
219
220 /* avoid the need for a I/O completion work item */
221 if (iocb->ki_flags & IOCB_DSYNC)
222 op |= REQ_FUA;
223 return op;
224}
225
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600226#define DIO_INLINE_BIO_VECS 4
227
228static void blkdev_bio_end_io_simple(struct bio *bio)
229{
230 struct task_struct *waiter = bio->bi_private;
231
232 WRITE_ONCE(bio->bi_private, NULL);
Jens Axboe06193172018-11-13 21:16:54 -0700233 blk_wake_io_task(waiter);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600234}
235
236static ssize_t
237__blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
238 int nr_pages)
239{
240 struct file *file = iocb->ki_filp;
241 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Christoph Hellwig9fec4a22019-06-26 15:49:26 +0200242 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600243 loff_t pos = iocb->ki_pos;
244 bool should_dirty = false;
245 struct bio bio;
246 ssize_t ret;
247 blk_qc_t qc;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600248
Jens Axboe9a794fb2016-11-22 08:12:39 -0700249 if ((pos | iov_iter_alignment(iter)) &
250 (bdev_logical_block_size(bdev) - 1))
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600251 return -EINVAL;
252
Jens Axboe72ecad22016-11-16 23:11:42 -0700253 if (nr_pages <= DIO_INLINE_BIO_VECS)
254 vecs = inline_vecs;
255 else {
Kees Cook6da2ec52018-06-12 13:55:00 -0700256 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
257 GFP_KERNEL);
Jens Axboe72ecad22016-11-16 23:11:42 -0700258 if (!vecs)
259 return -ENOMEM;
260 }
261
Ming Lei3a83f462016-11-22 08:57:21 -0700262 bio_init(&bio, vecs, nr_pages);
Christoph Hellwig74d46992017-08-23 19:10:32 +0200263 bio_set_dev(&bio, bdev);
Damien Le Moal4d1a4762016-11-22 15:38:49 +0900264 bio.bi_iter.bi_sector = pos >> 9;
Jens Axboe45d06cf2017-06-27 11:01:22 -0600265 bio.bi_write_hint = iocb->ki_hint;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600266 bio.bi_private = current;
267 bio.bi_end_io = blkdev_bio_end_io_simple;
Adam Manzanares074111c2018-05-22 10:52:20 -0700268 bio.bi_ioprio = iocb->ki_ioprio;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600269
270 ret = bio_iov_iter_get_pages(&bio, iter);
271 if (unlikely(ret))
Martin Wilck9362dd12018-07-25 23:15:08 +0200272 goto out;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600273 ret = bio.bi_iter.bi_size;
274
275 if (iov_iter_rw(iter) == READ) {
Jens Axboe78250c02016-11-17 17:50:47 +0100276 bio.bi_opf = REQ_OP_READ;
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600277 if (iter_is_iovec(iter))
278 should_dirty = true;
279 } else {
Jens Axboe78250c02016-11-17 17:50:47 +0100280 bio.bi_opf = dio_bio_write_op(iocb);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600281 task_io_account_write(ret);
282 }
Jens Axboed1e36282018-08-29 10:36:56 -0600283 if (iocb->ki_flags & IOCB_HIPRI)
Jens Axboe0bbb2802018-12-21 09:10:46 -0700284 bio_set_polled(&bio, iocb);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600285
286 qc = submit_bio(&bio);
287 for (;;) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -0800288 set_current_state(TASK_UNINTERRUPTIBLE);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600289 if (!READ_ONCE(bio.bi_private))
290 break;
291 if (!(iocb->ki_flags & IOCB_HIPRI) ||
Jens Axboe0a1b8b82018-11-26 08:24:43 -0700292 !blk_poll(bdev_get_queue(bdev), qc, true))
Ming Leie6249cd2020-05-03 09:54:22 +0800293 blk_io_schedule();
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600294 }
295 __set_current_state(TASK_RUNNING);
296
Christoph Hellwig9fec4a22019-06-26 15:49:26 +0200297 bio_release_pages(&bio, should_dirty);
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200298 if (unlikely(bio.bi_status))
Linus Torvaldsc6b1e362017-07-03 10:34:51 -0700299 ret = blk_status_to_errno(bio.bi_status);
Jens Axboe9ae3b3f52017-06-28 15:30:13 -0600300
Martin Wilck9362dd12018-07-25 23:15:08 +0200301out:
302 if (vecs != inline_vecs)
303 kfree(vecs);
304
Jens Axboe9ae3b3f52017-06-28 15:30:13 -0600305 bio_uninit(&bio);
306
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600307 return ret;
308}
309
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700310struct blkdev_dio {
311 union {
312 struct kiocb *iocb;
313 struct task_struct *waiter;
314 };
315 size_t size;
316 atomic_t ref;
317 bool multi_bio : 1;
318 bool should_dirty : 1;
319 bool is_sync : 1;
320 struct bio bio;
321};
322
Kent Overstreet52190f82018-05-20 18:25:55 -0400323static struct bio_set blkdev_dio_pool;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700324
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700325static int blkdev_iopoll(struct kiocb *kiocb, bool wait)
326{
327 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host);
328 struct request_queue *q = bdev_get_queue(bdev);
329
330 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait);
331}
332
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700333static void blkdev_bio_end_io(struct bio *bio)
334{
335 struct blkdev_dio *dio = bio->bi_private;
336 bool should_dirty = dio->should_dirty;
337
Jason Yana89afe52019-04-12 10:09:16 +0800338 if (bio->bi_status && !dio->bio.bi_status)
339 dio->bio.bi_status = bio->bi_status;
340
341 if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700342 if (!dio->is_sync) {
343 struct kiocb *iocb = dio->iocb;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200344 ssize_t ret;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700345
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200346 if (likely(!dio->bio.bi_status)) {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700347 ret = dio->size;
348 iocb->ki_pos += ret;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200349 } else {
350 ret = blk_status_to_errno(dio->bio.bi_status);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700351 }
352
353 dio->iocb->ki_complete(iocb, ret, 0);
Christoph Hellwig531724a2018-11-30 09:23:48 +0100354 if (dio->multi_bio)
355 bio_put(&dio->bio);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700356 } else {
357 struct task_struct *waiter = dio->waiter;
358
359 WRITE_ONCE(dio->waiter, NULL);
Jens Axboe06193172018-11-13 21:16:54 -0700360 blk_wake_io_task(waiter);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700361 }
362 }
363
364 if (should_dirty) {
365 bio_check_pages_dirty(bio);
366 } else {
Christoph Hellwig57dfe3c2019-06-26 15:49:25 +0200367 bio_release_pages(bio, false);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700368 bio_put(bio);
369 }
370}
371
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800372static ssize_t
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700373__blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800374{
375 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +0900376 struct inode *inode = bdev_file_inode(file);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700377 struct block_device *bdev = I_BDEV(inode);
Christoph Hellwig64d656a2016-12-22 19:20:45 +0100378 struct blk_plug plug;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700379 struct blkdev_dio *dio;
380 struct bio *bio;
Jens Axboecb700eb2018-11-15 19:56:53 -0700381 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
Christoph Hellwig690e5322017-01-24 14:50:19 +0100382 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700383 loff_t pos = iocb->ki_pos;
384 blk_qc_t qc = BLK_QC_T_NONE;
Jens Axboe7b6620d2019-08-15 11:09:16 -0600385 int ret = 0;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700386
Jens Axboe9a794fb2016-11-22 08:12:39 -0700387 if ((pos | iov_iter_alignment(iter)) &
388 (bdev_logical_block_size(bdev) - 1))
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700389 return -EINVAL;
390
Jens Axboe7b6620d2019-08-15 11:09:16 -0600391 bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700392
393 dio = container_of(bio, struct blkdev_dio, bio);
Christoph Hellwig690e5322017-01-24 14:50:19 +0100394 dio->is_sync = is_sync = is_sync_kiocb(iocb);
Christoph Hellwig531724a2018-11-30 09:23:48 +0100395 if (dio->is_sync) {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700396 dio->waiter = current;
Christoph Hellwig531724a2018-11-30 09:23:48 +0100397 bio_get(bio);
398 } else {
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700399 dio->iocb = iocb;
Christoph Hellwig531724a2018-11-30 09:23:48 +0100400 }
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700401
402 dio->size = 0;
403 dio->multi_bio = false;
David Howells00e23702018-10-22 13:07:28 +0100404 dio->should_dirty = is_read && iter_is_iovec(iter);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700405
Jens Axboecb700eb2018-11-15 19:56:53 -0700406 /*
407 * Don't plug for HIPRI/polled IO, as those should go straight
408 * to issue
409 */
410 if (!is_poll)
411 blk_start_plug(&plug);
412
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700413 for (;;) {
Christoph Hellwig74d46992017-08-23 19:10:32 +0200414 bio_set_dev(bio, bdev);
Damien Le Moal4d1a4762016-11-22 15:38:49 +0900415 bio->bi_iter.bi_sector = pos >> 9;
Jens Axboe45d06cf2017-06-27 11:01:22 -0600416 bio->bi_write_hint = iocb->ki_hint;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700417 bio->bi_private = dio;
418 bio->bi_end_io = blkdev_bio_end_io;
Adam Manzanares074111c2018-05-22 10:52:20 -0700419 bio->bi_ioprio = iocb->ki_ioprio;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700420
Jens Axboee15c2ff2019-08-06 13:34:31 -0600421 ret = bio_iov_iter_get_pages(bio, iter);
422 if (unlikely(ret)) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200423 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700424 bio_endio(bio);
425 break;
426 }
427
428 if (is_read) {
429 bio->bi_opf = REQ_OP_READ;
430 if (dio->should_dirty)
431 bio_set_pages_dirty(bio);
432 } else {
433 bio->bi_opf = dio_bio_write_op(iocb);
434 task_io_account_write(bio->bi_iter.bi_size);
435 }
436
Jens Axboe7b6620d2019-08-15 11:09:16 -0600437 dio->size += bio->bi_iter.bi_size;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700438 pos += bio->bi_iter.bi_size;
439
440 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
441 if (!nr_pages) {
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700442 bool polled = false;
443
444 if (iocb->ki_flags & IOCB_HIPRI) {
Jens Axboe0bbb2802018-12-21 09:10:46 -0700445 bio_set_polled(bio, iocb);
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700446 polled = true;
447 }
Jens Axboed34513d2018-11-06 14:29:11 -0700448
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700449 qc = submit_bio(bio);
Christoph Hellwigeae83ce2018-11-30 08:31:52 -0700450
451 if (polled)
452 WRITE_ONCE(iocb->ki_cookie, qc);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700453 break;
454 }
455
456 if (!dio->multi_bio) {
Christoph Hellwig531724a2018-11-30 09:23:48 +0100457 /*
458 * AIO needs an extra reference to ensure the dio
459 * structure which is embedded into the first bio
460 * stays around.
461 */
462 if (!is_sync)
463 bio_get(bio);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700464 dio->multi_bio = true;
465 atomic_set(&dio->ref, 2);
466 } else {
467 atomic_inc(&dio->ref);
468 }
469
Jens Axboe7b6620d2019-08-15 11:09:16 -0600470 submit_bio(bio);
471 bio = bio_alloc(GFP_KERNEL, nr_pages);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700472 }
Jens Axboecb700eb2018-11-15 19:56:53 -0700473
474 if (!is_poll)
475 blk_finish_plug(&plug);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700476
Christoph Hellwig690e5322017-01-24 14:50:19 +0100477 if (!is_sync)
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700478 return -EIOCBQUEUED;
479
480 for (;;) {
Linus Torvalds1ac5cd42019-01-02 10:46:03 -0800481 set_current_state(TASK_UNINTERRUPTIBLE);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700482 if (!READ_ONCE(dio->waiter))
483 break;
484
485 if (!(iocb->ki_flags & IOCB_HIPRI) ||
Jens Axboe0a1b8b82018-11-26 08:24:43 -0700486 !blk_poll(bdev_get_queue(bdev), qc, true))
Ming Leie6249cd2020-05-03 09:54:22 +0800487 blk_io_schedule();
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700488 }
489 __set_current_state(TASK_RUNNING);
490
Christoph Hellwig36ffc6c2017-06-03 09:38:00 +0200491 if (!ret)
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200492 ret = blk_status_to_errno(dio->bio.bi_status);
Jens Axboee15c2ff2019-08-06 13:34:31 -0600493 if (likely(!ret))
494 ret = dio->size;
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700495
496 bio_put(&dio->bio);
497 return ret;
498}
499
500static ssize_t
501blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
502{
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600503 int nr_pages;
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800504
Jens Axboe72ecad22016-11-16 23:11:42 -0700505 nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES + 1);
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600506 if (!nr_pages)
507 return 0;
Jens Axboe72ecad22016-11-16 23:11:42 -0700508 if (is_sync_kiocb(iocb) && nr_pages <= BIO_MAX_PAGES)
Christoph Hellwig189ce2b2016-10-31 11:59:25 -0600509 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700510
511 return __blkdev_direct_IO(iocb, iter, min(nr_pages, BIO_MAX_PAGES));
Andrew Mortonb2e895d2007-02-03 01:14:01 -0800512}
513
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700514static __init int blkdev_init(void)
515{
Kent Overstreet52190f82018-05-20 18:25:55 -0400516 return bioset_init(&blkdev_dio_pool, 4, offsetof(struct blkdev_dio, bio), BIOSET_NEED_BVECS);
Christoph Hellwig542ff7b2016-11-16 23:14:22 -0700517}
518module_init(blkdev_init);
519
Jan Kara5cee5812009-04-27 16:43:51 +0200520int __sync_blockdev(struct block_device *bdev, int wait)
521{
522 if (!bdev)
523 return 0;
524 if (!wait)
525 return filemap_flush(bdev->bd_inode->i_mapping);
526 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
527}
528
Nick Piggin585d3bc2009-02-25 10:44:19 +0100529/*
530 * Write out and wait upon all the dirty data associated with a block
531 * device via its mapping. Does not take the superblock lock.
532 */
533int sync_blockdev(struct block_device *bdev)
534{
Jan Kara5cee5812009-04-27 16:43:51 +0200535 return __sync_blockdev(bdev, 1);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100536}
537EXPORT_SYMBOL(sync_blockdev);
538
539/*
540 * Write out and wait upon all dirty data associated with this
541 * device. Filesystem data as well as the underlying block
542 * device. Takes the superblock lock.
543 */
544int fsync_bdev(struct block_device *bdev)
545{
546 struct super_block *sb = get_super(bdev);
547 if (sb) {
Jan Kara60b06802009-04-27 16:43:53 +0200548 int res = sync_filesystem(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100549 drop_super(sb);
550 return res;
551 }
552 return sync_blockdev(bdev);
553}
Al Viro47e44912009-04-01 07:07:16 -0400554EXPORT_SYMBOL(fsync_bdev);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100555
556/**
557 * freeze_bdev -- lock a filesystem and force it into a consistent state
558 * @bdev: blockdevice to lock
559 *
Nick Piggin585d3bc2009-02-25 10:44:19 +0100560 * If a superblock is found on this device, we take the s_umount semaphore
561 * on it to make sure nobody unmounts until the snapshot creation is done.
562 * The reference counter (bd_fsfreeze_count) guarantees that only the last
563 * unfreeze process can unfreeze the frozen filesystem actually when multiple
564 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
565 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
566 * actually.
567 */
568struct super_block *freeze_bdev(struct block_device *bdev)
569{
570 struct super_block *sb;
571 int error = 0;
572
573 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200574 if (++bdev->bd_fsfreeze_count > 1) {
575 /*
576 * We don't even need to grab a reference - the first call
577 * to freeze_bdev grab an active reference and only the last
578 * thaw_bdev drops it.
579 */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100580 sb = get_super(bdev);
Andrey Ryabinin5bb53c02016-08-23 18:55:31 +0300581 if (sb)
582 drop_super(sb);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100583 mutex_unlock(&bdev->bd_fsfreeze_mutex);
584 return sb;
585 }
Nick Piggin585d3bc2009-02-25 10:44:19 +0100586
Christoph Hellwig45042302009-08-03 23:28:35 +0200587 sb = get_active_super(bdev);
588 if (!sb)
589 goto out;
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600590 if (sb->s_op->freeze_super)
591 error = sb->s_op->freeze_super(sb);
592 else
593 error = freeze_super(sb);
Josef Bacik18e9e512010-03-23 10:34:56 -0400594 if (error) {
595 deactivate_super(sb);
596 bdev->bd_fsfreeze_count--;
Christoph Hellwig45042302009-08-03 23:28:35 +0200597 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Josef Bacik18e9e512010-03-23 10:34:56 -0400598 return ERR_PTR(error);
Nick Piggin585d3bc2009-02-25 10:44:19 +0100599 }
Josef Bacik18e9e512010-03-23 10:34:56 -0400600 deactivate_super(sb);
Christoph Hellwig45042302009-08-03 23:28:35 +0200601 out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100602 sync_blockdev(bdev);
603 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig4fadd7b2009-08-03 23:28:06 +0200604 return sb; /* thaw_bdev releases s->s_umount */
Nick Piggin585d3bc2009-02-25 10:44:19 +0100605}
606EXPORT_SYMBOL(freeze_bdev);
607
608/**
609 * thaw_bdev -- unlock filesystem
610 * @bdev: blockdevice to unlock
611 * @sb: associated superblock
612 *
613 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
614 */
615int thaw_bdev(struct block_device *bdev, struct super_block *sb)
616{
Christoph Hellwig45042302009-08-03 23:28:35 +0200617 int error = -EINVAL;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100618
619 mutex_lock(&bdev->bd_fsfreeze_mutex);
Christoph Hellwig45042302009-08-03 23:28:35 +0200620 if (!bdev->bd_fsfreeze_count)
Josef Bacik18e9e512010-03-23 10:34:56 -0400621 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100622
Christoph Hellwig45042302009-08-03 23:28:35 +0200623 error = 0;
624 if (--bdev->bd_fsfreeze_count > 0)
Josef Bacik18e9e512010-03-23 10:34:56 -0400625 goto out;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100626
Christoph Hellwig45042302009-08-03 23:28:35 +0200627 if (!sb)
Josef Bacik18e9e512010-03-23 10:34:56 -0400628 goto out;
Christoph Hellwig45042302009-08-03 23:28:35 +0200629
Benjamin Marzinski48b6bca2014-11-13 20:42:03 -0600630 if (sb->s_op->thaw_super)
631 error = sb->s_op->thaw_super(sb);
632 else
633 error = thaw_super(sb);
Pierre Morel997198b2016-10-04 10:53:40 +0200634 if (error)
Josef Bacik18e9e512010-03-23 10:34:56 -0400635 bdev->bd_fsfreeze_count++;
Josef Bacik18e9e512010-03-23 10:34:56 -0400636out:
Nick Piggin585d3bc2009-02-25 10:44:19 +0100637 mutex_unlock(&bdev->bd_fsfreeze_mutex);
Pierre Morel997198b2016-10-04 10:53:40 +0200638 return error;
Nick Piggin585d3bc2009-02-25 10:44:19 +0100639}
640EXPORT_SYMBOL(thaw_bdev);
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
643{
644 return block_write_full_page(page, blkdev_get_block, wbc);
645}
646
647static int blkdev_readpage(struct file * file, struct page * page)
648{
649 return block_read_full_page(page, blkdev_get_block);
650}
651
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700652static void blkdev_readahead(struct readahead_control *rac)
Akinobu Mita447f05b2014-10-09 15:26:58 -0700653{
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -0700654 mpage_readahead(rac, blkdev_get_block);
Akinobu Mita447f05b2014-10-09 15:26:58 -0700655}
656
Nick Piggin6272b5a2007-10-16 01:25:04 -0700657static int blkdev_write_begin(struct file *file, struct address_space *mapping,
658 loff_t pos, unsigned len, unsigned flags,
659 struct page **pagep, void **fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660{
Christoph Hellwig155130a2010-06-04 11:29:58 +0200661 return block_write_begin(mapping, pos, len, flags, pagep,
662 blkdev_get_block);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
Nick Piggin6272b5a2007-10-16 01:25:04 -0700665static int blkdev_write_end(struct file *file, struct address_space *mapping,
666 loff_t pos, unsigned len, unsigned copied,
667 struct page *page, void *fsdata)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
Nick Piggin6272b5a2007-10-16 01:25:04 -0700669 int ret;
670 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
671
672 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300673 put_page(page);
Nick Piggin6272b5a2007-10-16 01:25:04 -0700674
675 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676}
677
678/*
679 * private llseek:
Al Viro496ad9a2013-01-23 17:07:38 -0500680 * for a block special file file_inode(file)->i_size is zero
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 * so we compute the size by hand (just as in block_read/write above)
682 */
Andrew Morton965c8e52012-12-17 15:59:39 -0800683static loff_t block_llseek(struct file *file, loff_t offset, int whence)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900685 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 loff_t retval;
687
Al Viro59551022016-01-22 15:40:57 -0500688 inode_lock(bd_inode);
Al Viro5d48f3a2013-06-23 21:34:45 +0400689 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
Al Viro59551022016-01-22 15:40:57 -0500690 inode_unlock(bd_inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 return retval;
692}
693
Josef Bacik02c24a82011-07-16 20:44:56 -0400694int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695{
Dan Williams4ebb16c2015-10-28 07:48:19 +0900696 struct inode *bd_inode = bdev_file_inode(filp);
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400697 struct block_device *bdev = I_BDEV(bd_inode);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100698 int error;
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200699
Jeff Layton372cf242017-07-06 07:02:28 -0400700 error = file_write_and_wait_range(filp, start, end);
Rafael J. Wysockida5aa862011-08-02 02:17:48 +0200701 if (error)
702 return error;
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100703
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400704 /*
705 * There is no need to serialise calls to blkdev_issue_flush with
706 * i_mutex and doing so causes performance issues with concurrent
707 * O_SYNC writers to a block device.
708 */
Christoph Hellwig93985542020-05-13 14:36:00 +0200709 error = blkdev_issue_flush(bdev, GFP_KERNEL);
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100710 if (error == -EOPNOTSUPP)
711 error = 0;
Anton Blanchardb8af67e2010-04-23 13:18:06 -0400712
Christoph Hellwigab0a9732009-10-29 14:14:04 +0100713 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714}
Andrew Mortonb1dd3b22010-04-06 14:35:00 -0700715EXPORT_SYMBOL(blkdev_fsync);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700717/**
718 * bdev_read_page() - Start reading a page from a block device
719 * @bdev: The device to read the page from
720 * @sector: The offset on the device to read the page to (need not be aligned)
721 * @page: The page to read
722 *
723 * On entry, the page should be locked. It will be unlocked when the page
724 * has been read. If the block driver implements rw_page synchronously,
725 * that will be true on exit from this function, but it need not be.
726 *
727 * Errors returned by this function are usually "soft", eg out of memory, or
728 * queue full; callers should try a different route to read this page rather
729 * than propagate an error back up the stack.
730 *
731 * Return: negative errno if an error occurs, 0 if submission was successful.
732 */
733int bdev_read_page(struct block_device *bdev, sector_t sector,
734 struct page *page)
735{
736 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc952015-11-19 13:29:28 -0800737 int result = -EOPNOTSUPP;
738
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400739 if (!ops->rw_page || bdev_get_integrity(bdev))
Dan Williams2e6edc952015-11-19 13:29:28 -0800740 return result;
741
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200742 result = blk_queue_enter(bdev->bd_disk->queue, 0);
Dan Williams2e6edc952015-11-19 13:29:28 -0800743 if (result)
744 return result;
Tejun Heo3f289dc2018-07-18 04:47:36 -0700745 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
746 REQ_OP_READ);
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200747 blk_queue_exit(bdev->bd_disk->queue);
Dan Williams2e6edc952015-11-19 13:29:28 -0800748 return result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700749}
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700750
751/**
752 * bdev_write_page() - Start writing a page to a block device
753 * @bdev: The device to write the page to
754 * @sector: The offset on the device to write the page to (need not be aligned)
755 * @page: The page to write
756 * @wbc: The writeback_control for the write
757 *
758 * On entry, the page should be locked and not currently under writeback.
759 * On exit, if the write started successfully, the page will be unlocked and
760 * under writeback. If the write failed already (eg the driver failed to
761 * queue the page to the device), the page will still be locked. If the
762 * caller is a ->writepage implementation, it will need to unlock the page.
763 *
764 * Errors returned by this function are usually "soft", eg out of memory, or
765 * queue full; callers should try a different route to write this page rather
766 * than propagate an error back up the stack.
767 *
768 * Return: negative errno if an error occurs, 0 if submission was successful.
769 */
770int bdev_write_page(struct block_device *bdev, sector_t sector,
771 struct page *page, struct writeback_control *wbc)
772{
773 int result;
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700774 const struct block_device_operations *ops = bdev->bd_disk->fops;
Dan Williams2e6edc952015-11-19 13:29:28 -0800775
Vishal Vermaf68eb1e2015-05-12 13:48:53 -0400776 if (!ops->rw_page || bdev_get_integrity(bdev))
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700777 return -EOPNOTSUPP;
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200778 result = blk_queue_enter(bdev->bd_disk->queue, 0);
Dan Williams2e6edc952015-11-19 13:29:28 -0800779 if (result)
780 return result;
781
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700782 set_page_writeback(page);
Tejun Heo3f289dc2018-07-18 04:47:36 -0700783 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
784 REQ_OP_WRITE);
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700785 if (result) {
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700786 end_page_writeback(page);
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700787 } else {
788 clean_page_buffers(page);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700789 unlock_page(page);
Matthew Wilcoxf8927602017-10-13 15:58:15 -0700790 }
Christoph Hellwige556f6b2020-06-26 10:01:56 +0200791 blk_queue_exit(bdev->bd_disk->queue);
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700792 return result;
793}
Matthew Wilcox47a191f2014-06-04 16:07:46 -0700794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795/*
796 * pseudo-fs
797 */
798
799static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
Christoph Lametere18b8902006-12-06 20:33:20 -0800800static struct kmem_cache * bdev_cachep __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802static struct inode *bdev_alloc_inode(struct super_block *sb)
803{
Christoph Lametere94b1762006-12-06 20:33:17 -0800804 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 if (!ei)
806 return NULL;
807 return &ei->vfs_inode;
808}
809
Al Viro41149cb2019-04-10 15:12:38 -0400810static void bdev_free_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811{
Al Viro41149cb2019-04-10 15:12:38 -0400812 kmem_cache_free(bdev_cachep, BDEV_I(inode));
Nick Pigginfa0d7e3d2011-01-07 17:49:49 +1100813}
814
Alexey Dobriyan51cc5062008-07-25 19:45:34 -0700815static void init_once(void *foo)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816{
817 struct bdev_inode *ei = (struct bdev_inode *) foo;
818 struct block_device *bdev = &ei->bdev;
819
Christoph Lametera35afb82007-05-16 22:10:57 -0700820 memset(bdev, 0, sizeof(*bdev));
821 mutex_init(&bdev->bd_mutex);
Tejun Heo49731ba2011-01-14 18:43:57 +0100822#ifdef CONFIG_SYSFS
823 INIT_LIST_HEAD(&bdev->bd_holder_disks);
824#endif
Jan Karaa5a79d02017-03-02 16:50:13 +0100825 bdev->bd_bdi = &noop_backing_dev_info;
Christoph Lametera35afb82007-05-16 22:10:57 -0700826 inode_init_once(&ei->vfs_inode);
Takashi Satofcccf502009-01-09 16:40:59 -0800827 /* Initialize mutex for freeze. */
828 mutex_init(&bdev->bd_fsfreeze_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829}
830
Al Virob57922d2010-06-07 14:34:48 -0400831static void bdev_evict_inode(struct inode *inode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832{
833 struct block_device *bdev = &BDEV_I(inode)->bdev;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700834 truncate_inode_pages_final(&inode->i_data);
Al Virob57922d2010-06-07 14:34:48 -0400835 invalidate_inode_buffers(inode); /* is it needed here? */
Jan Karadbd57682012-05-03 14:48:02 +0200836 clear_inode(inode);
Jan Karaf7597412017-03-23 01:37:00 +0100837 /* Detach inode from wb early as bdi_put() may free bdi->wb */
838 inode_detach_wb(inode);
Jan Karaa5a79d02017-03-02 16:50:13 +0100839 if (bdev->bd_bdi != &noop_backing_dev_info) {
Jan Karab1d2dc562017-02-02 15:56:52 +0100840 bdi_put(bdev->bd_bdi);
Jan Karaa5a79d02017-03-02 16:50:13 +0100841 bdev->bd_bdi = &noop_backing_dev_info;
842 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843}
844
Josef 'Jeff' Sipekee9b6d62007-02-12 00:55:41 -0800845static const struct super_operations bdev_sops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 .statfs = simple_statfs,
847 .alloc_inode = bdev_alloc_inode,
Al Viro41149cb2019-04-10 15:12:38 -0400848 .free_inode = bdev_free_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 .drop_inode = generic_delete_inode,
Al Virob57922d2010-06-07 14:34:48 -0400850 .evict_inode = bdev_evict_inode,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851};
852
David Howells9030d162019-03-25 16:38:23 +0000853static int bd_init_fs_context(struct fs_context *fc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854{
David Howells9030d162019-03-25 16:38:23 +0000855 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
856 if (!ctx)
857 return -ENOMEM;
858 fc->s_iflags |= SB_I_CGROUPWB;
859 ctx->ops = &bdev_sops;
860 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861}
862
863static struct file_system_type bd_type = {
864 .name = "bdev",
David Howells9030d162019-03-25 16:38:23 +0000865 .init_fs_context = bd_init_fs_context,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 .kill_sb = kill_anon_super,
867};
868
Tejun Heoa212b102015-05-22 17:13:33 -0400869struct super_block *blockdev_superblock __read_mostly;
870EXPORT_SYMBOL_GPL(blockdev_superblock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872void __init bdev_cache_init(void)
873{
874 int err;
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300875 static struct vfsmount *bd_mnt;
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
Paul Jacksonfffb60f2006-03-24 03:16:06 -0800878 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
Vladimir Davydov5d097052016-01-14 15:18:21 -0800879 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
Paul Mundt20c2df82007-07-20 10:11:58 +0900880 init_once);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 err = register_filesystem(&bd_type);
882 if (err)
883 panic("Cannot register bdev pseudo-fs");
884 bd_mnt = kern_mount(&bd_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 if (IS_ERR(bd_mnt))
886 panic("Cannot create bdev pseudo-fs");
Sergey Senozhatskyace85772012-01-10 02:43:59 +0300887 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
889
890/*
891 * Most likely _very_ bad one - but then it's hardly critical for small
892 * /dev and can be fixed when somebody will need really large one.
893 * Keep in mind that it will be fed through icache hash function too.
894 */
895static inline unsigned long hash(dev_t dev)
896{
897 return MAJOR(dev)+MINOR(dev);
898}
899
900static int bdev_test(struct inode *inode, void *data)
901{
902 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
903}
904
905static int bdev_set(struct inode *inode, void *data)
906{
907 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
908 return 0;
909}
910
Christoph Hellwig10ed1662020-09-25 18:06:18 +0200911static struct block_device *bdget(dev_t dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912{
913 struct block_device *bdev;
914 struct inode *inode;
915
Denis ChengRqc2acf7b2008-12-01 14:34:56 -0800916 inode = iget5_locked(blockdev_superblock, hash(dev),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 bdev_test, bdev_set, &dev);
918
919 if (!inode)
920 return NULL;
921
922 bdev = &BDEV_I(inode)->bdev;
923
924 if (inode->i_state & I_NEW) {
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +0200925 spin_lock_init(&bdev->bd_size_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 bdev->bd_contains = NULL;
Lachlan McIlroy782b94c2011-06-30 11:01:45 +1000927 bdev->bd_super = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 bdev->bd_inode = inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 bdev->bd_part_count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 inode->i_mode = S_IFBLK;
931 inode->i_rdev = dev;
932 inode->i_bdev = bdev;
933 inode->i_data.a_ops = &def_blk_aops;
934 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 unlock_new_inode(inode);
936 }
937 return bdev;
938}
939
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200940/**
941 * bdgrab -- Grab a reference to an already referenced block device
942 * @bdev: Block device to grab a reference to.
943 */
944struct block_device *bdgrab(struct block_device *bdev)
945{
Al Viro7de9c6ee2010-10-23 11:11:40 -0400946 ihold(bdev->bd_inode);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200947 return bdev;
948}
Anatol Pomozovc1681bf2013-04-01 09:47:56 -0700949EXPORT_SYMBOL(bdgrab);
Alan Jenkinsdddac6a2009-07-29 21:07:55 +0200950
Christoph Hellwig10ed1662020-09-25 18:06:18 +0200951struct block_device *bdget_part(struct hd_struct *part)
952{
953 return bdget(part_devt(part));
954}
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956long nr_blockdev_pages(void)
957{
Christoph Hellwig1008fe62020-06-26 10:01:58 +0200958 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 long ret = 0;
Christoph Hellwig1008fe62020-06-26 10:01:58 +0200960
961 spin_lock(&blockdev_superblock->s_inode_list_lock);
962 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
963 ret += inode->i_mapping->nrpages;
964 spin_unlock(&blockdev_superblock->s_inode_list_lock);
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 return ret;
967}
968
969void bdput(struct block_device *bdev)
970{
971 iput(bdev->bd_inode);
972}
973
974EXPORT_SYMBOL(bdput);
975
976static struct block_device *bd_acquire(struct inode *inode)
977{
978 struct block_device *bdev;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 spin_lock(&bdev_lock);
981 bdev = inode->i_bdev;
Jan Karacccd9fb2017-02-21 18:09:48 +0100982 if (bdev && !inode_unhashed(bdev->bd_inode)) {
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +0100983 bdgrab(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 spin_unlock(&bdev_lock);
985 return bdev;
986 }
987 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -0700988
Jan Karacccd9fb2017-02-21 18:09:48 +0100989 /*
990 * i_bdev references block device inode that was already shut down
991 * (corresponding device got removed). Remove the reference and look
992 * up block device inode again just in case new device got
993 * reestablished under the same device number.
994 */
995 if (bdev)
996 bd_forget(inode);
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 bdev = bdget(inode->i_rdev);
999 if (bdev) {
1000 spin_lock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001001 if (!inode->i_bdev) {
1002 /*
Al Viro7de9c6ee2010-10-23 11:11:40 -04001003 * We take an additional reference to bd_inode,
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001004 * and it's released in clear_inode() of inode.
1005 * So, we can access it via ->i_mapping always
1006 * without igrab().
1007 */
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +01001008 bdgrab(bdev);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001009 inode->i_bdev = bdev;
1010 inode->i_mapping = bdev->bd_inode->i_mapping;
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001011 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 spin_unlock(&bdev_lock);
1013 }
1014 return bdev;
1015}
1016
1017/* Call when you free inode */
1018
1019void bd_forget(struct inode *inode)
1020{
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001021 struct block_device *bdev = NULL;
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 spin_lock(&bdev_lock);
Yan Hongb4ea2ea2013-04-30 15:26:47 -07001024 if (!sb_is_blkdev_sb(inode->i_sb))
1025 bdev = inode->i_bdev;
Al Viroa4a4f942016-07-19 13:16:52 -04001026 inode->i_bdev = NULL;
1027 inode->i_mapping = &inode->i_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 spin_unlock(&bdev_lock);
OGAWA Hirofumi09d967c2006-06-22 14:47:21 -07001029
1030 if (bdev)
Ilya Dryomoved8a9d2c2015-11-20 22:18:43 +01001031 bdput(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001034/**
1035 * bd_may_claim - test whether a block device can be claimed
1036 * @bdev: block device of interest
1037 * @whole: whole block device containing @bdev, may equal @bdev
1038 * @holder: holder trying to claim @bdev
1039 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001040 * Test whether @bdev can be claimed by @holder.
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001041 *
1042 * CONTEXT:
1043 * spin_lock(&bdev_lock).
1044 *
1045 * RETURNS:
1046 * %true if @bdev can be claimed, %false otherwise.
1047 */
1048static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
1049 void *holder)
1050{
1051 if (bdev->bd_holder == holder)
1052 return true; /* already a holder */
1053 else if (bdev->bd_holder != NULL)
1054 return false; /* held by someone else */
NeilBrownbcc7f5b2016-12-12 08:21:51 -07001055 else if (whole == bdev)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001056 return true; /* is a whole device which isn't held */
1057
Tejun Heoe525fd82010-11-13 11:55:17 +01001058 else if (whole->bd_holder == bd_may_claim)
Tejun Heo1a3cbbc2010-04-07 18:52:29 +09001059 return true; /* is a partition of a device that is being partitioned */
1060 else if (whole->bd_holder != NULL)
1061 return false; /* is a partition of a held device */
1062 else
1063 return true; /* is a partition of an un-held device */
1064}
1065
1066/**
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001067 * bd_prepare_to_claim - claim a block device
Tejun Heo6b4517a2010-04-07 18:53:59 +09001068 * @bdev: block device of interest
1069 * @whole: the whole device containing @bdev, may equal @bdev
1070 * @holder: holder trying to claim @bdev
1071 *
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001072 * Claim @bdev. This function fails if @bdev is already claimed by another
1073 * holder and waits if another claiming is in progress. return, the caller
1074 * has ownership of bd_claiming and bd_holder[s].
Tejun Heo6b4517a2010-04-07 18:53:59 +09001075 *
1076 * RETURNS:
1077 * 0 if @bdev can be claimed, -EBUSY otherwise.
1078 */
Christoph Hellwigecbe6bc2020-07-16 16:33:09 +02001079int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
1080 void *holder)
Tejun Heo6b4517a2010-04-07 18:53:59 +09001081{
1082retry:
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001083 spin_lock(&bdev_lock);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001084 /* if someone else claimed, fail */
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001085 if (!bd_may_claim(bdev, whole, holder)) {
1086 spin_unlock(&bdev_lock);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001087 return -EBUSY;
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001088 }
Tejun Heo6b4517a2010-04-07 18:53:59 +09001089
Tejun Heoe75aa852010-08-04 17:59:39 +02001090 /* if claiming is already in progress, wait for it to finish */
1091 if (whole->bd_claiming) {
Tejun Heo6b4517a2010-04-07 18:53:59 +09001092 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
1093 DEFINE_WAIT(wait);
1094
1095 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
1096 spin_unlock(&bdev_lock);
1097 schedule();
1098 finish_wait(wq, &wait);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001099 goto retry;
1100 }
1101
1102 /* yay, all mine */
Christoph Hellwig58e46ed2020-07-16 16:33:08 +02001103 whole->bd_claiming = holder;
1104 spin_unlock(&bdev_lock);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001105 return 0;
1106}
Christoph Hellwigecbe6bc2020-07-16 16:33:09 +02001107EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
Tejun Heo6b4517a2010-04-07 18:53:59 +09001108
Jan Kara560e7cb2018-02-26 13:01:42 +01001109static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
1110{
1111 struct gendisk *disk = get_gendisk(bdev->bd_dev, partno);
1112
1113 if (!disk)
1114 return NULL;
1115 /*
1116 * Now that we hold gendisk reference we make sure bdev we looked up is
1117 * not stale. If it is, it means device got removed and created before
1118 * we looked up gendisk and we fail open in such case. Associating
1119 * unhashed bdev with newly created gendisk could lead to two bdevs
1120 * (and thus two independent caches) being associated with one device
1121 * which is bad.
1122 */
1123 if (inode_unhashed(bdev->bd_inode)) {
1124 put_disk_and_module(disk);
1125 return NULL;
1126 }
1127 return disk;
1128}
1129
Jan Kara89e524c02019-07-30 13:10:14 +02001130static void bd_clear_claiming(struct block_device *whole, void *holder)
1131{
1132 lockdep_assert_held(&bdev_lock);
1133 /* tell others that we're done */
1134 BUG_ON(whole->bd_claiming != holder);
1135 whole->bd_claiming = NULL;
1136 wake_up_bit(&whole->bd_claiming, 0);
1137}
1138
1139/**
1140 * bd_finish_claiming - finish claiming of a block device
1141 * @bdev: block device of interest
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001142 * @whole: whole block device
Jan Kara89e524c02019-07-30 13:10:14 +02001143 * @holder: holder that has claimed @bdev
1144 *
1145 * Finish exclusive open of a block device. Mark the device as exlusively
1146 * open by the holder and wake up all waiters for exclusive open to finish.
1147 */
Christoph Hellwig764b23b2020-06-20 09:16:36 +02001148static void bd_finish_claiming(struct block_device *bdev,
1149 struct block_device *whole, void *holder)
Jan Kara89e524c02019-07-30 13:10:14 +02001150{
1151 spin_lock(&bdev_lock);
1152 BUG_ON(!bd_may_claim(bdev, whole, holder));
1153 /*
1154 * Note that for a whole device bd_holders will be incremented twice,
1155 * and bd_holder will be set to bd_may_claim before being set to holder
1156 */
1157 whole->bd_holders++;
1158 whole->bd_holder = bd_may_claim;
1159 bdev->bd_holders++;
1160 bdev->bd_holder = holder;
1161 bd_clear_claiming(whole, holder);
1162 spin_unlock(&bdev_lock);
1163}
Jan Kara89e524c02019-07-30 13:10:14 +02001164
1165/**
1166 * bd_abort_claiming - abort claiming of a block device
1167 * @bdev: block device of interest
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001168 * @whole: whole block device
Jan Kara89e524c02019-07-30 13:10:14 +02001169 * @holder: holder that has claimed @bdev
1170 *
1171 * Abort claiming of a block device when the exclusive open failed. This can be
1172 * also used when exclusive open is not actually desired and we just needed
1173 * to block other exclusive openers for a while.
1174 */
1175void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
1176 void *holder)
1177{
1178 spin_lock(&bdev_lock);
1179 bd_clear_claiming(whole, holder);
1180 spin_unlock(&bdev_lock);
1181}
1182EXPORT_SYMBOL(bd_abort_claiming);
Tejun Heo6b4517a2010-04-07 18:53:59 +09001183
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001184#ifdef CONFIG_SYSFS
Tejun Heo49731ba2011-01-14 18:43:57 +01001185struct bd_holder_disk {
1186 struct list_head list;
1187 struct gendisk *disk;
1188 int refcnt;
1189};
1190
1191static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev,
1192 struct gendisk *disk)
1193{
1194 struct bd_holder_disk *holder;
1195
1196 list_for_each_entry(holder, &bdev->bd_holder_disks, list)
1197 if (holder->disk == disk)
1198 return holder;
1199 return NULL;
1200}
1201
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -07001202static int add_symlink(struct kobject *from, struct kobject *to)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001203{
Andrew Morton4d7dd8fd2006-09-29 01:58:56 -07001204 return sysfs_create_link(from, to, kobject_name(to));
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001205}
1206
1207static void del_symlink(struct kobject *from, struct kobject *to)
1208{
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001209 sysfs_remove_link(from, kobject_name(to));
1210}
1211
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001212/**
Tejun Heoe09b4572010-11-13 11:55:17 +01001213 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1214 * @bdev: the claimed slave bdev
1215 * @disk: the holding disk
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001216 *
Tejun Heo49731ba2011-01-14 18:43:57 +01001217 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1218 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001219 * This functions creates the following sysfs symlinks.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001220 *
Tejun Heoe09b4572010-11-13 11:55:17 +01001221 * - from "slaves" directory of the holder @disk to the claimed @bdev
1222 * - from "holders" directory of the @bdev to the holder @disk
1223 *
1224 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1225 * passed to bd_link_disk_holder(), then:
1226 *
1227 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1228 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1229 *
1230 * The caller must have claimed @bdev before calling this function and
1231 * ensure that both @bdev and @disk are valid during the creation and
1232 * lifetime of these symlinks.
1233 *
1234 * CONTEXT:
1235 * Might sleep.
1236 *
1237 * RETURNS:
1238 * 0 on success, -errno on failure.
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001239 */
Tejun Heoe09b4572010-11-13 11:55:17 +01001240int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001241{
Tejun Heo49731ba2011-01-14 18:43:57 +01001242 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001243 int ret = 0;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001244
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001245 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomuradf6c0cd2006-10-30 16:23:56 -05001246
Tejun Heo49731ba2011-01-14 18:43:57 +01001247 WARN_ON_ONCE(!bdev->bd_holder);
Johannes Weiner4e916722007-07-15 23:41:25 -07001248
Tejun Heoe09b4572010-11-13 11:55:17 +01001249 /* FIXME: remove the following once add_disk() handles errors */
1250 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1251 goto out_unlock;
Johannes Weiner4e916722007-07-15 23:41:25 -07001252
Tejun Heo49731ba2011-01-14 18:43:57 +01001253 holder = bd_find_holder_disk(bdev, disk);
1254 if (holder) {
1255 holder->refcnt++;
Tejun Heoe09b4572010-11-13 11:55:17 +01001256 goto out_unlock;
1257 }
1258
Tejun Heo49731ba2011-01-14 18:43:57 +01001259 holder = kzalloc(sizeof(*holder), GFP_KERNEL);
1260 if (!holder) {
1261 ret = -ENOMEM;
1262 goto out_unlock;
1263 }
1264
1265 INIT_LIST_HEAD(&holder->list);
1266 holder->disk = disk;
1267 holder->refcnt = 1;
1268
1269 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1270 if (ret)
1271 goto out_free;
1272
1273 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1274 if (ret)
1275 goto out_del;
Tejun Heoe7407d12011-02-24 09:56:32 +01001276 /*
1277 * bdev could be deleted beneath us which would implicitly destroy
1278 * the holder directory. Hold on to it.
1279 */
1280 kobject_get(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001281
1282 list_add(&holder->list, &bdev->bd_holder_disks);
1283 goto out_unlock;
1284
1285out_del:
1286 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1287out_free:
1288 kfree(holder);
Tejun Heoe09b4572010-11-13 11:55:17 +01001289out_unlock:
Jun'ichi Nomurab4cf1b72006-03-27 01:18:00 -08001290 mutex_unlock(&bdev->bd_mutex);
Tejun Heoe09b4572010-11-13 11:55:17 +01001291 return ret;
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001292}
Tejun Heoe09b4572010-11-13 11:55:17 +01001293EXPORT_SYMBOL_GPL(bd_link_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001294
Tejun Heo49731ba2011-01-14 18:43:57 +01001295/**
1296 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1297 * @bdev: the calimed slave bdev
1298 * @disk: the holding disk
1299 *
1300 * DON'T USE THIS UNLESS YOU'RE ALREADY USING IT.
1301 *
1302 * CONTEXT:
1303 * Might sleep.
1304 */
1305void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001306{
Tejun Heo49731ba2011-01-14 18:43:57 +01001307 struct bd_holder_disk *holder;
Tejun Heoe09b4572010-11-13 11:55:17 +01001308
Tejun Heo49731ba2011-01-14 18:43:57 +01001309 mutex_lock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001310
Tejun Heo49731ba2011-01-14 18:43:57 +01001311 holder = bd_find_holder_disk(bdev, disk);
1312
1313 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) {
1314 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1315 del_symlink(bdev->bd_part->holder_dir,
1316 &disk_to_dev(disk)->kobj);
Tejun Heoe7407d12011-02-24 09:56:32 +01001317 kobject_put(bdev->bd_part->holder_dir);
Tejun Heo49731ba2011-01-14 18:43:57 +01001318 list_del_init(&holder->list);
1319 kfree(holder);
1320 }
1321
1322 mutex_unlock(&bdev->bd_mutex);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001323}
Tejun Heo49731ba2011-01-14 18:43:57 +01001324EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
Jun'ichi Nomura641dc632006-03-27 01:17:57 -08001325#endif
1326
Andrew Patterson0c002c22008-09-04 14:27:20 -06001327/**
Randy Dunlap57d1b532008-10-09 10:42:38 +02001328 * check_disk_size_change - checks for disk size change and adjusts bdev size.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001329 * @disk: struct gendisk to check
1330 * @bdev: struct bdev to adjust.
Christoph Hellwig5afb7832018-05-29 16:42:59 +02001331 * @verbose: if %true log a message about a size change if there is any
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001332 *
1333 * This routine checks to see if the bdev size does not match the disk size
shunki-fujita849cf552018-04-05 16:20:07 -07001334 * and adjusts it if it differs. When shrinking the bdev size, its all caches
1335 * are freed.
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001336 */
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001337static void check_disk_size_change(struct gendisk *disk,
1338 struct block_device *bdev, bool verbose)
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001339{
1340 loff_t disk_size, bdev_size;
1341
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001342 spin_lock(&bdev->bd_size_lock);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001343 disk_size = (loff_t)get_capacity(disk) << 9;
1344 bdev_size = i_size_read(bdev->bd_inode);
1345 if (disk_size != bdev_size) {
Christoph Hellwig5afb7832018-05-29 16:42:59 +02001346 if (verbose) {
1347 printk(KERN_INFO
1348 "%s: detected capacity change from %lld to %lld\n",
1349 disk->disk_name, bdev_size, disk_size);
1350 }
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001351 i_size_write(bdev->bd_inode, disk_size);
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001352 }
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001353 spin_unlock(&bdev->bd_size_lock);
1354
1355 if (bdev_size > disk_size) {
1356 if (__invalidate_device(bdev, false))
Christoph Hellwig9a3ffbb2020-07-08 14:25:43 +02001357 pr_warn("VFS: busy inodes on resized disk %s\n",
1358 disk->disk_name);
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001359 }
1360}
Andrew Pattersonc3279d12008-09-04 14:27:25 -06001361
1362/**
Christoph Hellwig659e56b2020-09-01 17:57:43 +02001363 * revalidate_disk_size - checks for disk size change and adjusts bdev size.
1364 * @disk: struct gendisk to check
1365 * @verbose: if %true log a message about a size change if there is any
1366 *
1367 * This routine checks to see if the bdev size does not match the disk size
1368 * and adjusts it if it differs. When shrinking the bdev size, its all caches
1369 * are freed.
1370 */
1371void revalidate_disk_size(struct gendisk *disk, bool verbose)
1372{
1373 struct block_device *bdev;
1374
1375 /*
1376 * Hidden disks don't have associated bdev so there's no point in
1377 * revalidating them.
1378 */
1379 if (disk->flags & GENHD_FL_HIDDEN)
1380 return;
1381
1382 bdev = bdget_disk(disk, 0);
1383 if (bdev) {
1384 check_disk_size_change(disk, bdev, verbose);
1385 bdput(bdev);
1386 }
1387}
1388EXPORT_SYMBOL(revalidate_disk_size);
1389
Christoph Hellwig611bee52020-08-23 11:10:41 +02001390void bd_set_nr_sectors(struct block_device *bdev, sector_t sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391{
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001392 spin_lock(&bdev->bd_size_lock);
Christoph Hellwig611bee52020-08-23 11:10:41 +02001393 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
Christoph Hellwigc2b4bb82020-08-23 11:10:42 +02001394 spin_unlock(&bdev->bd_size_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395}
Christoph Hellwig611bee52020-08-23 11:10:41 +02001396EXPORT_SYMBOL(bd_set_nr_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Al Viro4385bab2013-05-05 22:11:03 -04001398static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001399
Christoph Hellwig142fe8f2019-11-14 15:34:35 +01001400int bdev_disk_changed(struct block_device *bdev, bool invalidate)
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001401{
Christoph Hellwig142fe8f2019-11-14 15:34:35 +01001402 struct gendisk *disk = bdev->bd_disk;
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001403 int ret;
1404
Christoph Hellwigf0b870d2019-11-14 15:34:36 +01001405 lockdep_assert_held(&bdev->bd_mutex);
1406
Christoph Hellwig38430f02020-09-21 09:19:45 +02001407 clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
Christoph Hellwig6540fbf2020-09-01 17:57:41 +02001408
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001409rescan:
Christoph Hellwigd46430b2020-04-14 09:28:57 +02001410 ret = blk_drop_partitions(bdev);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001411 if (ret)
1412 return ret;
1413
Christoph Hellwigd981cb52020-03-18 09:12:06 +01001414 /*
1415 * Historically we only set the capacity to zero for devices that
1416 * support partitions (independ of actually having partitions created).
1417 * Doing that is rather inconsistent, but changing it broke legacy
1418 * udisks polling for legacy ide-cdrom devices. Use the crude check
1419 * below to get the sane behavior for most device while not breaking
1420 * userspace for this particular setup.
1421 */
1422 if (invalidate) {
1423 if (disk_part_scan_enabled(disk) ||
1424 !(disk->flags & GENHD_FL_REMOVABLE))
1425 set_capacity(disk, 0);
1426 } else {
1427 if (disk->fops->revalidate_disk)
1428 disk->fops->revalidate_disk(disk);
1429 }
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001430
1431 check_disk_size_change(disk, bdev, !invalidate);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001432
Christoph Hellwig142fe8f2019-11-14 15:34:35 +01001433 if (get_capacity(disk)) {
1434 ret = blk_add_partitions(disk, bdev);
1435 if (ret == -EAGAIN)
1436 goto rescan;
Eric Biggers490547c2019-12-02 10:21:34 -08001437 } else if (invalidate) {
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001438 /*
1439 * Tell userspace that the media / partition table may have
1440 * changed.
1441 */
1442 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001443 }
1444
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001445 return ret;
1446}
Christoph Hellwigf0b870d2019-11-14 15:34:36 +01001447/*
1448 * Only exported for for loop and dasd for historic reasons. Don't use in new
1449 * code!
1450 */
1451EXPORT_SYMBOL_GPL(bdev_disk_changed);
Christoph Hellwiga1548b62019-11-14 15:34:34 +01001452
Peter Zijlstra6d740cd2007-02-20 13:58:18 -08001453/*
1454 * bd_mutex locking:
1455 *
1456 * mutex_lock(part->bd_mutex)
1457 * mutex_lock_nested(whole->bd_mutex, 1)
1458 */
1459
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001460static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder,
1461 int for_part)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462{
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001463 struct block_device *whole = NULL, *claiming = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 struct gendisk *disk;
Pavel Emelyanov7db9cfd2008-06-05 22:46:27 -07001465 int ret;
Tejun Heocf771cb2008-09-03 09:01:09 +02001466 int partno;
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001467 bool first_open = false, unblock_events = true, need_restart;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
NeilBrownd3374822009-01-09 08:31:10 +11001469 restart:
Christoph Hellwigc5638ab2020-07-16 16:33:07 +02001470 need_restart = false;
Tejun Heo89f97492008-11-05 10:21:06 +01001471 ret = -ENXIO;
Jan Kara560e7cb2018-02-26 13:01:42 +01001472 disk = bdev_get_gendisk(bdev, &partno);
Tejun Heo0762b8b2008-08-25 19:56:12 +09001473 if (!disk)
Arnd Bergmann6e9624b2010-08-07 18:25:34 +02001474 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001476 if (partno) {
1477 whole = bdget_disk(disk, 0);
1478 if (!whole) {
1479 ret = -ENOMEM;
1480 goto out_put_disk;
1481 }
1482 }
1483
1484 if (!for_part && (mode & FMODE_EXCL)) {
1485 WARN_ON_ONCE(!holder);
1486 if (whole)
1487 claiming = whole;
1488 else
1489 claiming = bdev;
1490 ret = bd_prepare_to_claim(bdev, claiming, holder);
1491 if (ret)
1492 goto out_put_whole;
1493 }
1494
Tejun Heo69e02c52011-03-09 19:54:27 +01001495 disk_block_events(disk);
NeilBrown6796bf52006-12-08 02:36:16 -08001496 mutex_lock_nested(&bdev->bd_mutex, for_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 if (!bdev->bd_openers) {
Jan Kara89736652018-02-26 13:01:40 +01001498 first_open = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 bdev->bd_disk = disk;
1500 bdev->bd_contains = bdev;
Christoph Hellwigc2ee0702017-08-23 19:10:31 +02001501 bdev->bd_partno = partno;
Dan Williams03cdadb2016-02-26 15:19:43 -08001502
Tejun Heocf771cb2008-09-03 09:01:09 +02001503 if (!partno) {
Tejun Heo89f97492008-11-05 10:21:06 +01001504 ret = -ENXIO;
1505 bdev->bd_part = disk_get_part(disk, partno);
1506 if (!bdev->bd_part)
1507 goto out_clear;
1508
Tejun Heo1196f8b2011-04-21 20:54:45 +02001509 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 if (disk->fops->open) {
Al Viro572c4892007-10-08 13:24:05 -04001511 ret = disk->fops->open(bdev, mode);
Christoph Hellwigc5638ab2020-07-16 16:33:07 +02001512 /*
1513 * If we lost a race with 'disk' being deleted,
1514 * try again. See md.c
1515 */
1516 if (ret == -ERESTARTSYS)
1517 need_restart = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 }
Tejun Heo7e697232011-05-23 13:26:07 +02001519
Jan Kara04906b22019-01-14 09:48:10 +01001520 if (!ret) {
Christoph Hellwig611bee52020-08-23 11:10:41 +02001521 bd_set_nr_sectors(bdev, get_capacity(disk));
Jan Kara04906b22019-01-14 09:48:10 +01001522 set_init_blocksize(bdev);
1523 }
Tejun Heo7e697232011-05-23 13:26:07 +02001524
Tejun Heo1196f8b2011-04-21 20:54:45 +02001525 /*
1526 * If the device is invalidated, rescan partition
1527 * if open succeeded or failed with -ENOMEDIUM.
1528 * The latter is necessary to prevent ghost
1529 * partitions on a removed medium.
1530 */
Christoph Hellwig38430f02020-09-21 09:19:45 +02001531 if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
Jan Kara731dc482019-10-21 10:37:59 +02001532 (!ret || ret == -ENOMEDIUM))
1533 bdev_disk_changed(bdev, ret == -ENOMEDIUM);
Dan Williams5a023cd2015-11-30 10:20:29 -08001534
Tejun Heo1196f8b2011-04-21 20:54:45 +02001535 if (ret)
1536 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 } else {
NeilBrown37be4122006-12-08 02:36:16 -08001538 BUG_ON(for_part);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001539 ret = __blkdev_get(whole, mode, NULL, 1);
1540 if (ret)
Tejun Heo0762b8b2008-08-25 19:56:12 +09001541 goto out_clear;
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001542 bdev->bd_contains = bdgrab(whole);
Tejun Heo89f97492008-11-05 10:21:06 +01001543 bdev->bd_part = disk_get_part(disk, partno);
Tejun Heoe71bf0d2008-09-03 09:03:02 +02001544 if (!(disk->flags & GENHD_FL_UP) ||
Tejun Heo89f97492008-11-05 10:21:06 +01001545 !bdev->bd_part || !bdev->bd_part->nr_sects) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 ret = -ENXIO;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001547 goto out_clear;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 }
Christoph Hellwig611bee52020-08-23 11:10:41 +02001549 bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects);
Jan Kara04906b22019-01-14 09:48:10 +01001550 set_init_blocksize(bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 }
Jan Kara03e26272017-03-23 01:36:53 +01001552
1553 if (bdev->bd_bdi == &noop_backing_dev_info)
1554 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 if (bdev->bd_contains == bdev) {
Tejun Heo1196f8b2011-04-21 20:54:45 +02001557 ret = 0;
1558 if (bdev->bd_disk->fops->open)
Al Viro572c4892007-10-08 13:24:05 -04001559 ret = bdev->bd_disk->fops->open(bdev, mode);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001560 /* the same as first opener case, read comment there */
Christoph Hellwig38430f02020-09-21 09:19:45 +02001561 if (test_bit(GD_NEED_PART_SCAN, &disk->state) &&
Jan Kara731dc482019-10-21 10:37:59 +02001562 (!ret || ret == -ENOMEDIUM))
1563 bdev_disk_changed(bdev, ret == -ENOMEDIUM);
Tejun Heo1196f8b2011-04-21 20:54:45 +02001564 if (ret)
1565 goto out_unlock_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 }
1567 }
1568 bdev->bd_openers++;
NeilBrown37be4122006-12-08 02:36:16 -08001569 if (for_part)
1570 bdev->bd_part_count++;
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001571 if (claiming)
1572 bd_finish_claiming(bdev, claiming, holder);
1573
1574 /*
1575 * Block event polling for write claims if requested. Any write holder
1576 * makes the write_holder state stick until all are released. This is
1577 * good enough and tracking individual writeable reference is too
1578 * fragile given the way @mode is used in blkdev_get/put().
1579 */
1580 if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1581 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1582 bdev->bd_write_holder = true;
1583 unblock_events = false;
1584 }
Arjan van de Venc039e312006-03-23 03:00:28 -08001585 mutex_unlock(&bdev->bd_mutex);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001586
1587 if (unblock_events)
1588 disk_unblock_events(disk);
1589
Jan Kara89736652018-02-26 13:01:40 +01001590 /* only one opener holds refs to the module and disk */
1591 if (!first_open)
1592 put_disk_and_module(disk);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001593 if (whole)
1594 bdput(whole);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 return 0;
1596
Tejun Heo0762b8b2008-08-25 19:56:12 +09001597 out_clear:
Tejun Heo89f97492008-11-05 10:21:06 +01001598 disk_put_part(bdev->bd_part);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 bdev->bd_disk = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001600 bdev->bd_part = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 if (bdev != bdev->bd_contains)
Al Viro572c4892007-10-08 13:24:05 -04001602 __blkdev_put(bdev->bd_contains, mode, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 bdev->bd_contains = NULL;
Tejun Heo0762b8b2008-08-25 19:56:12 +09001604 out_unlock_bdev:
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001605 if (claiming)
1606 bd_abort_claiming(bdev, claiming, holder);
Arjan van de Venc039e312006-03-23 03:00:28 -08001607 mutex_unlock(&bdev->bd_mutex);
Tejun Heo69e02c52011-03-09 19:54:27 +01001608 disk_unblock_events(disk);
Christoph Hellwig5b642d8b2020-07-16 16:33:10 +02001609 out_put_whole:
1610 if (whole)
1611 bdput(whole);
1612 out_put_disk:
Jan Kara9df6c292018-02-26 13:01:39 +01001613 put_disk_and_module(disk);
Christoph Hellwigc5638ab2020-07-16 16:33:07 +02001614 if (need_restart)
1615 goto restart;
Dan Carpenter4345cab2011-03-19 13:53:31 +01001616 out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 return ret;
1618}
1619
Tejun Heod4d77622010-11-13 11:55:18 +01001620/**
1621 * blkdev_get - open a block device
1622 * @bdev: block_device to open
1623 * @mode: FMODE_* mask
1624 * @holder: exclusive holder identifier
1625 *
1626 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1627 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1628 * @holder is invalid. Exclusive opens may nest for the same @holder.
1629 *
1630 * On success, the reference count of @bdev is unchanged. On failure,
1631 * @bdev is put.
1632 *
1633 * CONTEXT:
1634 * Might sleep.
1635 *
1636 * RETURNS:
1637 * 0 on success, -errno on failure.
1638 */
Christoph Hellwig1fb1a2a2020-09-21 09:19:58 +02001639static int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640{
Christoph Hellwige5c7fb42020-08-31 20:02:36 +02001641 int ret, perm = 0;
Tejun Heoe525fd82010-11-13 11:55:17 +01001642
Christoph Hellwige5c7fb42020-08-31 20:02:36 +02001643 if (mode & FMODE_READ)
1644 perm |= MAY_READ;
1645 if (mode & FMODE_WRITE)
1646 perm |= MAY_WRITE;
1647 ret = devcgroup_inode_permission(bdev->bd_inode, perm);
1648 if (ret)
1649 goto bdput;
1650
1651 ret =__blkdev_get(bdev, mode, holder, 0);
1652 if (ret)
1653 goto bdput;
1654 return 0;
1655
1656bdput:
1657 bdput(bdev);
1658 return ret;
NeilBrown37be4122006-12-08 02:36:16 -08001659}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660
Tejun Heod4d77622010-11-13 11:55:18 +01001661/**
1662 * blkdev_get_by_path - open a block device by name
1663 * @path: path to the block device to open
1664 * @mode: FMODE_* mask
1665 * @holder: exclusive holder identifier
1666 *
1667 * Open the blockdevice described by the device file at @path. @mode
1668 * and @holder are identical to blkdev_get().
1669 *
1670 * On success, the returned block_device has reference count of one.
1671 *
1672 * CONTEXT:
1673 * Might sleep.
1674 *
1675 * RETURNS:
1676 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1677 */
1678struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1679 void *holder)
1680{
1681 struct block_device *bdev;
1682 int err;
1683
1684 bdev = lookup_bdev(path);
1685 if (IS_ERR(bdev))
1686 return bdev;
1687
1688 err = blkdev_get(bdev, mode, holder);
1689 if (err)
1690 return ERR_PTR(err);
1691
Chuck Ebberte51900f2011-02-16 18:11:53 -05001692 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1693 blkdev_put(bdev, mode);
1694 return ERR_PTR(-EACCES);
1695 }
1696
Tejun Heod4d77622010-11-13 11:55:18 +01001697 return bdev;
1698}
1699EXPORT_SYMBOL(blkdev_get_by_path);
1700
1701/**
1702 * blkdev_get_by_dev - open a block device by device number
1703 * @dev: device number of block device to open
1704 * @mode: FMODE_* mask
1705 * @holder: exclusive holder identifier
1706 *
1707 * Open the blockdevice described by device number @dev. @mode and
1708 * @holder are identical to blkdev_get().
1709 *
1710 * Use it ONLY if you really do not have anything better - i.e. when
1711 * you are behind a truly sucky interface and all you are given is a
1712 * device number. _Never_ to be used for internal purposes. If you
1713 * ever need it - reconsider your API.
1714 *
1715 * On success, the returned block_device has reference count of one.
1716 *
1717 * CONTEXT:
1718 * Might sleep.
1719 *
1720 * RETURNS:
1721 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1722 */
1723struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1724{
1725 struct block_device *bdev;
1726 int err;
1727
1728 bdev = bdget(dev);
1729 if (!bdev)
1730 return ERR_PTR(-ENOMEM);
1731
1732 err = blkdev_get(bdev, mode, holder);
1733 if (err)
1734 return ERR_PTR(err);
1735
1736 return bdev;
1737}
1738EXPORT_SYMBOL(blkdev_get_by_dev);
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740static int blkdev_open(struct inode * inode, struct file * filp)
1741{
1742 struct block_device *bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744 /*
1745 * Preserve backwards compatibility and allow large file access
1746 * even if userspace doesn't ask for it explicitly. Some mkfs
1747 * binary needs it. We might want to drop this workaround
1748 * during an unstable branch.
1749 */
1750 filp->f_flags |= O_LARGEFILE;
1751
Jens Axboea304f072020-05-22 09:14:08 -06001752 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
Christoph Hellwigc35fc7a2017-08-29 16:13:21 +02001753
Al Viro572c4892007-10-08 13:24:05 -04001754 if (filp->f_flags & O_NDELAY)
1755 filp->f_mode |= FMODE_NDELAY;
1756 if (filp->f_flags & O_EXCL)
1757 filp->f_mode |= FMODE_EXCL;
1758 if ((filp->f_flags & O_ACCMODE) == 3)
1759 filp->f_mode |= FMODE_WRITE_IOCTL;
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 bdev = bd_acquire(inode);
Pavel Emelianov6a2aae02006-10-28 10:38:33 -07001762 if (bdev == NULL)
1763 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Al Viro572c4892007-10-08 13:24:05 -04001765 filp->f_mapping = bdev->bd_inode->i_mapping;
Jeff Layton5660e132017-07-06 07:02:25 -04001766 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
Al Viro572c4892007-10-08 13:24:05 -04001767
Tejun Heoe525fd82010-11-13 11:55:17 +01001768 return blkdev_get(bdev, filp->f_mode, filp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770
Al Viro4385bab2013-05-05 22:11:03 -04001771static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001772{
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001773 struct gendisk *disk = bdev->bd_disk;
NeilBrown37be4122006-12-08 02:36:16 -08001774 struct block_device *victim = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001775
Douglas Andersonb849dd82020-03-24 14:48:27 -07001776 /*
1777 * Sync early if it looks like we're the last one. If someone else
1778 * opens the block device between now and the decrement of bd_openers
1779 * then we did a sync that we didn't need to, but that's not the end
1780 * of the world and we want to avoid long (could be several minute)
1781 * syncs while holding the mutex.
1782 */
1783 if (bdev->bd_openers == 1)
1784 sync_blockdev(bdev);
1785
NeilBrown6796bf52006-12-08 02:36:16 -08001786 mutex_lock_nested(&bdev->bd_mutex, for_part);
NeilBrown37be4122006-12-08 02:36:16 -08001787 if (for_part)
1788 bdev->bd_part_count--;
1789
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001790 if (!--bdev->bd_openers) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001791 WARN_ON_ONCE(bdev->bd_holders);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001792 sync_blockdev(bdev);
1793 kill_bdev(bdev);
Ilya Dryomov43d1c0e2015-11-20 22:22:34 +01001794
Vivek Goyaldbd3ca52015-11-09 09:23:40 -07001795 bdev_write_inode(bdev);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001796 }
1797 if (bdev->bd_contains == bdev) {
1798 if (disk->fops->release)
Al Virodb2a1442013-05-05 21:52:57 -04001799 disk->fops->release(disk, mode);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001800 }
1801 if (!bdev->bd_openers) {
Tejun Heo0762b8b2008-08-25 19:56:12 +09001802 disk_put_part(bdev->bd_part);
1803 bdev->bd_part = NULL;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001804 bdev->bd_disk = NULL;
NeilBrown37be4122006-12-08 02:36:16 -08001805 if (bdev != bdev->bd_contains)
1806 victim = bdev->bd_contains;
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001807 bdev->bd_contains = NULL;
Tejun Heo523e1d32011-10-19 14:31:07 +02001808
Jan Kara9df6c292018-02-26 13:01:39 +01001809 put_disk_and_module(disk);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001810 }
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001811 mutex_unlock(&bdev->bd_mutex);
1812 bdput(bdev);
NeilBrown37be4122006-12-08 02:36:16 -08001813 if (victim)
Al Viro9a1c3542008-02-22 20:40:24 -05001814 __blkdev_put(victim, mode, 1);
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001815}
1816
Al Viro4385bab2013-05-05 22:11:03 -04001817void blkdev_put(struct block_device *bdev, fmode_t mode)
NeilBrown37be4122006-12-08 02:36:16 -08001818{
Tejun Heo85ef06d2011-07-01 16:17:47 +02001819 mutex_lock(&bdev->bd_mutex);
1820
Tejun Heoe525fd82010-11-13 11:55:17 +01001821 if (mode & FMODE_EXCL) {
Tejun Heo6a027ef2010-11-13 11:55:17 +01001822 bool bdev_free;
1823
1824 /*
1825 * Release a claim on the device. The holder fields
1826 * are protected with bdev_lock. bd_mutex is to
1827 * synchronize disk_holder unlinking.
1828 */
Tejun Heo6a027ef2010-11-13 11:55:17 +01001829 spin_lock(&bdev_lock);
1830
1831 WARN_ON_ONCE(--bdev->bd_holders < 0);
1832 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1833
1834 /* bd_contains might point to self, check in a separate step */
1835 if ((bdev_free = !bdev->bd_holders))
1836 bdev->bd_holder = NULL;
1837 if (!bdev->bd_contains->bd_holders)
1838 bdev->bd_contains->bd_holder = NULL;
1839
1840 spin_unlock(&bdev_lock);
1841
Tejun Heo77ea8872010-12-08 20:57:37 +01001842 /*
1843 * If this was the last claim, remove holder link and
1844 * unblock evpoll if it was a write holder.
1845 */
Tejun Heo85ef06d2011-07-01 16:17:47 +02001846 if (bdev_free && bdev->bd_write_holder) {
1847 disk_unblock_events(bdev->bd_disk);
1848 bdev->bd_write_holder = false;
Tejun Heo77ea8872010-12-08 20:57:37 +01001849 }
Tejun Heo69362172011-03-09 19:54:27 +01001850 }
Tejun Heo77ea8872010-12-08 20:57:37 +01001851
Tejun Heo85ef06d2011-07-01 16:17:47 +02001852 /*
1853 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1854 * event. This is to ensure detection of media removal commanded
1855 * from userland - e.g. eject(1).
1856 */
1857 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE);
1858
1859 mutex_unlock(&bdev->bd_mutex);
1860
Al Viro4385bab2013-05-05 22:11:03 -04001861 __blkdev_put(bdev, mode, 0);
NeilBrown37be4122006-12-08 02:36:16 -08001862}
Peter Zijlstra2e7b6512006-12-08 02:36:13 -08001863EXPORT_SYMBOL(blkdev_put);
1864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865static int blkdev_close(struct inode * inode, struct file * filp)
1866{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001867 struct block_device *bdev = I_BDEV(bdev_file_inode(filp));
Al Viro4385bab2013-05-05 22:11:03 -04001868 blkdev_put(bdev, filp->f_mode);
1869 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870}
1871
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07001872static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873{
Dan Williams4ebb16c2015-10-28 07:48:19 +09001874 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Al Viro56b26ad2008-09-19 03:17:36 -04001875 fmode_t mode = file->f_mode;
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001876
1877 /*
1878 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
1879 * to updated it before every ioctl.
1880 */
Al Viro56b26ad2008-09-19 03:17:36 -04001881 if (file->f_flags & O_NDELAY)
Christoph Hellwigfd4ce1a2008-11-05 14:58:42 +01001882 mode |= FMODE_NDELAY;
1883 else
1884 mode &= ~FMODE_NDELAY;
1885
Al Viro56b26ad2008-09-19 03:17:36 -04001886 return blkdev_ioctl(bdev, mode, cmd, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887}
1888
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001889/*
Christoph Hellwigeef99382009-08-20 17:43:41 +02001890 * Write data to the block device. Only intended for the block device itself
1891 * and the raw driver which basically is a fake block device.
1892 *
1893 * Does not take i_mutex for the write and thus is not for general purpose
1894 * use.
1895 */
Al Viro1456c0a2014-04-03 03:21:50 -04001896ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
Christoph Hellwigeef99382009-08-20 17:43:41 +02001897{
1898 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001899 struct inode *bd_inode = bdev_file_inode(file);
Al Viro7ec7b942015-04-07 11:35:14 -04001900 loff_t size = i_size_read(bd_inode);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001901 struct blk_plug plug;
Christoph Hellwigeef99382009-08-20 17:43:41 +02001902 ssize_t ret;
Al Viro5f380c72015-04-07 11:28:12 -04001903
Al Viro7ec7b942015-04-07 11:35:14 -04001904 if (bdev_read_only(I_BDEV(bd_inode)))
1905 return -EPERM;
Al Viro5f380c72015-04-07 11:28:12 -04001906
Christoph Hellwigbb3247a392020-09-21 09:19:55 +02001907 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
Darrick J. Wongdc617f22019-08-20 07:55:16 -07001908 return -ETXTBSY;
1909
Al Viro7ec7b942015-04-07 11:35:14 -04001910 if (!iov_iter_count(from))
Al Viro5f380c72015-04-07 11:28:12 -04001911 return 0;
1912
Al Viro7ec7b942015-04-07 11:35:14 -04001913 if (iocb->ki_pos >= size)
1914 return -ENOSPC;
1915
Christoph Hellwigc35fc7a2017-08-29 16:13:21 +02001916 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
1917 return -EOPNOTSUPP;
1918
Al Viro7ec7b942015-04-07 11:35:14 -04001919 iov_iter_truncate(from, size - iocb->ki_pos);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001920
Jianpeng Ma53362a02012-08-02 09:50:39 +02001921 blk_start_plug(&plug);
Al Viro1456c0a2014-04-03 03:21:50 -04001922 ret = __generic_file_write_iter(iocb, from);
Christoph Hellwige2592212016-04-07 08:52:01 -07001923 if (ret > 0)
1924 ret = generic_write_sync(iocb, ret);
Jianpeng Ma53362a02012-08-02 09:50:39 +02001925 blk_finish_plug(&plug);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001926 return ret;
1927}
Al Viro1456c0a2014-04-03 03:21:50 -04001928EXPORT_SYMBOL_GPL(blkdev_write_iter);
Christoph Hellwigeef99382009-08-20 17:43:41 +02001929
David Jefferyb2de5252014-09-29 10:21:10 -04001930ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001931{
1932 struct file *file = iocb->ki_filp;
Dan Williams4ebb16c2015-10-28 07:48:19 +09001933 struct inode *bd_inode = bdev_file_inode(file);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001934 loff_t size = i_size_read(bd_inode);
Al Viroa8860382014-04-02 20:02:21 -04001935 loff_t pos = iocb->ki_pos;
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001936
1937 if (pos >= size)
1938 return 0;
1939
1940 size -= pos;
Al Viroa8860382014-04-02 20:02:21 -04001941 iov_iter_truncate(to, size);
1942 return generic_file_read_iter(iocb, to);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001943}
David Jefferyb2de5252014-09-29 10:21:10 -04001944EXPORT_SYMBOL_GPL(blkdev_read_iter);
Linus Torvalds684c9aa2012-12-07 16:48:39 -08001945
Christoph Hellwigeef99382009-08-20 17:43:41 +02001946/*
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001947 * Try to release a page associated with block device when the system
1948 * is under memory pressure.
1949 */
1950static int blkdev_releasepage(struct page *page, gfp_t wait)
1951{
1952 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super;
1953
1954 if (super && super->s_op->bdev_try_to_free_page)
1955 return super->s_op->bdev_try_to_free_page(super, page, wait);
1956
1957 return try_to_free_buffers(page);
1958}
1959
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001960static int blkdev_writepages(struct address_space *mapping,
1961 struct writeback_control *wbc)
1962{
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001963 return generic_writepages(mapping, wbc);
1964}
1965
Adrian Bunk4c54ac62008-02-18 13:48:31 +01001966static const struct address_space_operations def_blk_aops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 .readpage = blkdev_readpage,
Matthew Wilcox (Oracle)d4388342020-06-01 21:47:02 -07001968 .readahead = blkdev_readahead,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 .writepage = blkdev_writepage,
Nick Piggin6272b5a2007-10-16 01:25:04 -07001970 .write_begin = blkdev_write_begin,
1971 .write_end = blkdev_write_end,
Ross Zwisler7f6d5b52016-02-26 15:19:55 -08001972 .writepages = blkdev_writepages,
Theodore Ts'o87d8fe12009-01-03 09:47:09 -05001973 .releasepage = blkdev_releasepage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 .direct_IO = blkdev_direct_IO,
Jan Kara88dbcbb2018-12-28 00:39:16 -08001975 .migratepage = buffer_migrate_page_norefs,
Mel Gormanb4597222013-07-03 15:02:05 -07001976 .is_dirty_writeback = buffer_check_dirty_writeback,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977};
1978
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001979#define BLKDEV_FALLOC_FL_SUPPORTED \
1980 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1981 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
1982
1983static long blkdev_fallocate(struct file *file, int mode, loff_t start,
1984 loff_t len)
1985{
1986 struct block_device *bdev = I_BDEV(bdev_file_inode(file));
Darrick J. Wong25f4c412016-10-11 13:51:11 -07001987 loff_t end = start + len - 1;
1988 loff_t isize;
1989 int error;
1990
1991 /* Fail if we don't recognize the flags. */
1992 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
1993 return -EOPNOTSUPP;
1994
1995 /* Don't go off the end of the device. */
1996 isize = i_size_read(bdev->bd_inode);
1997 if (start >= isize)
1998 return -EINVAL;
1999 if (end >= isize) {
2000 if (mode & FALLOC_FL_KEEP_SIZE) {
2001 len = isize - start;
2002 end = start + len - 1;
2003 } else
2004 return -EINVAL;
2005 }
2006
2007 /*
2008 * Don't allow IO that isn't aligned to logical block size.
2009 */
2010 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
2011 return -EINVAL;
2012
2013 /* Invalidate the page cache, including dirty pages. */
Jan Kara384d87e2020-09-04 10:58:52 +02002014 error = truncate_bdev_range(bdev, file->f_mode, start, end);
2015 if (error)
2016 return error;
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002017
2018 switch (mode) {
2019 case FALLOC_FL_ZERO_RANGE:
2020 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
2021 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
Christoph Hellwigee472d82017-04-05 19:21:08 +02002022 GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002023 break;
2024 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
Christoph Hellwig34045122017-04-05 19:21:11 +02002025 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
2026 GFP_KERNEL, BLKDEV_ZERO_NOFALLBACK);
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002027 break;
2028 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002029 error = blkdev_issue_discard(bdev, start >> 9, len >> 9,
2030 GFP_KERNEL, 0);
2031 break;
2032 default:
2033 return -EOPNOTSUPP;
2034 }
2035 if (error)
2036 return error;
2037
2038 /*
2039 * Invalidate again; if someone wandered in and dirtied a page,
2040 * the caller will be given -EBUSY. The third argument is
2041 * inclusive, so the rounding here is safe.
2042 */
Jan Kara384d87e2020-09-04 10:58:52 +02002043 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002044 start >> PAGE_SHIFT,
2045 end >> PAGE_SHIFT);
2046}
2047
Arjan van de Ven4b6f5d22006-03-28 01:56:42 -08002048const struct file_operations def_blk_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 .open = blkdev_open,
2050 .release = blkdev_close,
2051 .llseek = block_llseek,
Al Viroa8860382014-04-02 20:02:21 -04002052 .read_iter = blkdev_read_iter,
Al Viro1456c0a2014-04-03 03:21:50 -04002053 .write_iter = blkdev_write_iter,
Christoph Hellwigeae83ce2018-11-30 08:31:52 -07002054 .iopoll = blkdev_iopoll,
Dan Williamsacc93d32016-05-07 11:40:28 -07002055 .mmap = generic_file_mmap,
Andrew Mortonb1dd3b22010-04-06 14:35:00 -07002056 .fsync = blkdev_fsync,
Arnd Bergmannbb93e3a2005-06-23 00:10:15 -07002057 .unlocked_ioctl = block_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058#ifdef CONFIG_COMPAT
2059 .compat_ioctl = compat_blkdev_ioctl,
2060#endif
Linus Torvalds1e8b3332012-11-29 10:49:50 -08002061 .splice_read = generic_file_splice_read,
Al Viro8d020762014-04-05 04:27:08 -04002062 .splice_write = iter_file_splice_write,
Darrick J. Wong25f4c412016-10-11 13:51:11 -07002063 .fallocate = blkdev_fallocate,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064};
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066/**
2067 * lookup_bdev - lookup a struct block_device by name
Randy Dunlap94e29592009-01-06 14:41:15 -08002068 * @pathname: special file representing the block device
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 *
Randy Dunlap57d1b532008-10-09 10:42:38 +02002070 * Get a reference to the blockdevice at @pathname in the current
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 * namespace if possible and return it. Return ERR_PTR(error)
2072 * otherwise.
2073 */
Al Viro421748e2008-08-02 01:04:36 -04002074struct block_device *lookup_bdev(const char *pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075{
2076 struct block_device *bdev;
2077 struct inode *inode;
Al Viro421748e2008-08-02 01:04:36 -04002078 struct path path;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 int error;
2080
Al Viro421748e2008-08-02 01:04:36 -04002081 if (!pathname || !*pathname)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 return ERR_PTR(-EINVAL);
2083
Al Viro421748e2008-08-02 01:04:36 -04002084 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 if (error)
2086 return ERR_PTR(error);
2087
David Howellsbb6687342015-03-17 22:26:21 +00002088 inode = d_backing_inode(path.dentry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 error = -ENOTBLK;
2090 if (!S_ISBLK(inode->i_mode))
2091 goto fail;
2092 error = -EACCES;
Eric W. Biedermana2982cc2016-06-09 15:34:02 -05002093 if (!may_open_dev(&path))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 goto fail;
2095 error = -ENOMEM;
2096 bdev = bd_acquire(inode);
2097 if (!bdev)
2098 goto fail;
2099out:
Al Viro421748e2008-08-02 01:04:36 -04002100 path_put(&path);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 return bdev;
2102fail:
2103 bdev = ERR_PTR(error);
2104 goto out;
2105}
Al Virod5686b42008-08-01 05:00:11 -04002106EXPORT_SYMBOL(lookup_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
NeilBrown93b270f2011-02-24 17:25:47 +11002108int __invalidate_device(struct block_device *bdev, bool kill_dirty)
David Howellsb71e8a42006-08-29 19:06:11 +01002109{
2110 struct super_block *sb = get_super(bdev);
2111 int res = 0;
2112
2113 if (sb) {
2114 /*
2115 * no need to lock the super, get_super holds the
2116 * read mutex so the filesystem cannot go away
2117 * under us (->put_super runs with the write lock
2118 * hold).
2119 */
2120 shrink_dcache_sb(sb);
NeilBrown93b270f2011-02-24 17:25:47 +11002121 res = invalidate_inodes(sb, kill_dirty);
David Howellsb71e8a42006-08-29 19:06:11 +01002122 drop_super(sb);
2123 }
Peter Zijlstraf98393a2007-05-06 14:49:54 -07002124 invalidate_bdev(bdev);
David Howellsb71e8a42006-08-29 19:06:11 +01002125 return res;
2126}
2127EXPORT_SYMBOL(__invalidate_device);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002128
2129void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
2130{
2131 struct inode *inode, *old_inode = NULL;
2132
Dave Chinner74278da2015-03-04 12:37:22 -05002133 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002134 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
2135 struct address_space *mapping = inode->i_mapping;
Rabin Vincentaf309222016-12-01 09:18:28 +01002136 struct block_device *bdev;
Jan Kara5c0d6b62012-07-03 16:45:31 +02002137
2138 spin_lock(&inode->i_lock);
2139 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
2140 mapping->nrpages == 0) {
2141 spin_unlock(&inode->i_lock);
2142 continue;
2143 }
2144 __iget(inode);
2145 spin_unlock(&inode->i_lock);
Dave Chinner74278da2015-03-04 12:37:22 -05002146 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002147 /*
2148 * We hold a reference to 'inode' so it couldn't have been
2149 * removed from s_inodes list while we dropped the
Dave Chinner74278da2015-03-04 12:37:22 -05002150 * s_inode_list_lock We cannot iput the inode now as we can
Jan Kara5c0d6b62012-07-03 16:45:31 +02002151 * be holding the last reference and we cannot iput it under
Dave Chinner74278da2015-03-04 12:37:22 -05002152 * s_inode_list_lock. So we keep the reference and iput it
Jan Kara5c0d6b62012-07-03 16:45:31 +02002153 * later.
2154 */
2155 iput(old_inode);
2156 old_inode = inode;
Rabin Vincentaf309222016-12-01 09:18:28 +01002157 bdev = I_BDEV(inode);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002158
Rabin Vincentaf309222016-12-01 09:18:28 +01002159 mutex_lock(&bdev->bd_mutex);
2160 if (bdev->bd_openers)
2161 func(bdev, arg);
2162 mutex_unlock(&bdev->bd_mutex);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002163
Dave Chinner74278da2015-03-04 12:37:22 -05002164 spin_lock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002165 }
Dave Chinner74278da2015-03-04 12:37:22 -05002166 spin_unlock(&blockdev_superblock->s_inode_list_lock);
Jan Kara5c0d6b62012-07-03 16:45:31 +02002167 iput(old_inode);
2168}