blob: 8d329ca56b0f9a3019f0f972085157c43f544132 [file] [log] [blame]
Christoph Hellwigcd82cca2021-09-07 16:13:02 +02001// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/blkdev.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/uio.h>
13#include <linux/namei.h>
14#include <linux/task_io_accounting_ops.h>
15#include <linux/falloc.h>
16#include <linux/suspend.h>
Ming Leif278eb3d2021-09-23 10:37:51 +080017#include <linux/fs.h>
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020018#include "blk.h"
19
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +010020static inline struct inode *bdev_file_inode(struct file *file)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020021{
22 return file->f_mapping->host;
23}
24
25static int blkdev_get_block(struct inode *inode, sector_t iblock,
26 struct buffer_head *bh, int create)
27{
28 bh->b_bdev = I_BDEV(inode);
29 bh->b_blocknr = iblock;
30 set_buffer_mapped(bh);
31 return 0;
32}
33
34static unsigned int dio_bio_write_op(struct kiocb *iocb)
35{
36 unsigned int op = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
37
38 /* avoid the need for a I/O completion work item */
39 if (iocb->ki_flags & IOCB_DSYNC)
40 op |= REQ_FUA;
41 return op;
42}
43
44#define DIO_INLINE_BIO_VECS 4
45
46static void blkdev_bio_end_io_simple(struct bio *bio)
47{
48 struct task_struct *waiter = bio->bi_private;
49
50 WRITE_ONCE(bio->bi_private, NULL);
51 blk_wake_io_task(waiter);
52}
53
54static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
55 struct iov_iter *iter, unsigned int nr_pages)
56{
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +010057 struct block_device *bdev = iocb->ki_filp->private_data;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020058 struct bio_vec inline_vecs[DIO_INLINE_BIO_VECS], *vecs;
59 loff_t pos = iocb->ki_pos;
60 bool should_dirty = false;
61 struct bio bio;
62 ssize_t ret;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020063
64 if ((pos | iov_iter_alignment(iter)) &
65 (bdev_logical_block_size(bdev) - 1))
66 return -EINVAL;
67
68 if (nr_pages <= DIO_INLINE_BIO_VECS)
69 vecs = inline_vecs;
70 else {
71 vecs = kmalloc_array(nr_pages, sizeof(struct bio_vec),
72 GFP_KERNEL);
73 if (!vecs)
74 return -ENOMEM;
75 }
76
77 bio_init(&bio, vecs, nr_pages);
78 bio_set_dev(&bio, bdev);
Pavel Begunkov6549a872021-10-20 20:00:50 +010079 bio.bi_iter.bi_sector = pos >> SECTOR_SHIFT;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +020080 bio.bi_write_hint = iocb->ki_hint;
81 bio.bi_private = current;
82 bio.bi_end_io = blkdev_bio_end_io_simple;
83 bio.bi_ioprio = iocb->ki_ioprio;
84
85 ret = bio_iov_iter_get_pages(&bio, iter);
86 if (unlikely(ret))
87 goto out;
88 ret = bio.bi_iter.bi_size;
89
90 if (iov_iter_rw(iter) == READ) {
91 bio.bi_opf = REQ_OP_READ;
92 if (iter_is_iovec(iter))
93 should_dirty = true;
94 } else {
95 bio.bi_opf = dio_bio_write_op(iocb);
96 task_io_account_write(ret);
97 }
98 if (iocb->ki_flags & IOCB_NOWAIT)
99 bio.bi_opf |= REQ_NOWAIT;
100 if (iocb->ki_flags & IOCB_HIPRI)
101 bio_set_polled(&bio, iocb);
102
Christoph Hellwig3e087732021-10-12 13:12:24 +0200103 submit_bio(&bio);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200104 for (;;) {
105 set_current_state(TASK_UNINTERRUPTIBLE);
106 if (!READ_ONCE(bio.bi_private))
107 break;
Jens Axboe5a72e892021-10-12 09:24:29 -0600108 if (!(iocb->ki_flags & IOCB_HIPRI) || !bio_poll(&bio, NULL, 0))
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200109 blk_io_schedule();
110 }
111 __set_current_state(TASK_RUNNING);
112
113 bio_release_pages(&bio, should_dirty);
114 if (unlikely(bio.bi_status))
115 ret = blk_status_to_errno(bio.bi_status);
116
117out:
118 if (vecs != inline_vecs)
119 kfree(vecs);
120
121 bio_uninit(&bio);
122
123 return ret;
124}
125
Jens Axboe09ce8742021-10-14 11:17:43 -0600126enum {
Pavel Begunkove71aa912021-10-27 13:21:09 +0100127 DIO_SHOULD_DIRTY = 1,
128 DIO_IS_SYNC = 2,
Jens Axboe09ce8742021-10-14 11:17:43 -0600129};
130
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200131struct blkdev_dio {
132 union {
133 struct kiocb *iocb;
134 struct task_struct *waiter;
135 };
136 size_t size;
137 atomic_t ref;
Jens Axboe09ce8742021-10-14 11:17:43 -0600138 unsigned int flags;
Jens Axboe61556312021-10-15 16:55:05 -0600139 struct bio bio ____cacheline_aligned_in_smp;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200140};
141
142static struct bio_set blkdev_dio_pool;
143
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200144static void blkdev_bio_end_io(struct bio *bio)
145{
146 struct blkdev_dio *dio = bio->bi_private;
Jens Axboe09ce8742021-10-14 11:17:43 -0600147 bool should_dirty = dio->flags & DIO_SHOULD_DIRTY;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200148
149 if (bio->bi_status && !dio->bio.bi_status)
150 dio->bio.bi_status = bio->bi_status;
151
Pavel Begunkove71aa912021-10-27 13:21:09 +0100152 if (atomic_dec_and_test(&dio->ref)) {
Jens Axboe09ce8742021-10-14 11:17:43 -0600153 if (!(dio->flags & DIO_IS_SYNC)) {
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200154 struct kiocb *iocb = dio->iocb;
155 ssize_t ret;
156
Christoph Hellwig3e087732021-10-12 13:12:24 +0200157 WRITE_ONCE(iocb->private, NULL);
158
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200159 if (likely(!dio->bio.bi_status)) {
160 ret = dio->size;
161 iocb->ki_pos += ret;
162 } else {
163 ret = blk_status_to_errno(dio->bio.bi_status);
164 }
165
Jens Axboe6b19b762021-10-21 09:22:35 -0600166 dio->iocb->ki_complete(iocb, ret);
Pavel Begunkove71aa912021-10-27 13:21:09 +0100167 bio_put(&dio->bio);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200168 } else {
169 struct task_struct *waiter = dio->waiter;
170
171 WRITE_ONCE(dio->waiter, NULL);
172 blk_wake_io_task(waiter);
173 }
174 }
175
176 if (should_dirty) {
177 bio_check_pages_dirty(bio);
178 } else {
179 bio_release_pages(bio, false);
180 bio_put(bio);
181 }
182}
183
184static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
185 unsigned int nr_pages)
186{
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100187 struct block_device *bdev = iocb->ki_filp->private_data;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200188 struct blk_plug plug;
189 struct blkdev_dio *dio;
190 struct bio *bio;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200191 bool is_read = (iov_iter_rw(iter) == READ), is_sync;
192 loff_t pos = iocb->ki_pos;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200193 int ret = 0;
194
195 if ((pos | iov_iter_alignment(iter)) &
196 (bdev_logical_block_size(bdev) - 1))
197 return -EINVAL;
198
199 bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
200
201 dio = container_of(bio, struct blkdev_dio, bio);
Pavel Begunkove71aa912021-10-27 13:21:09 +0100202 atomic_set(&dio->ref, 1);
203 /*
204 * Grab an extra reference to ensure the dio structure which is embedded
205 * into the first bio stays around.
206 */
207 bio_get(bio);
208
Jens Axboe09ce8742021-10-14 11:17:43 -0600209 is_sync = is_sync_kiocb(iocb);
210 if (is_sync) {
211 dio->flags = DIO_IS_SYNC;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200212 dio->waiter = current;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200213 } else {
Jens Axboe09ce8742021-10-14 11:17:43 -0600214 dio->flags = 0;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200215 dio->iocb = iocb;
216 }
217
218 dio->size = 0;
Jens Axboe09ce8742021-10-14 11:17:43 -0600219 if (is_read && iter_is_iovec(iter))
220 dio->flags |= DIO_SHOULD_DIRTY;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200221
Pavel Begunkov25d207d2021-10-27 13:21:08 +0100222 blk_start_plug(&plug);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200223
224 for (;;) {
225 bio_set_dev(bio, bdev);
Pavel Begunkov6549a872021-10-20 20:00:50 +0100226 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200227 bio->bi_write_hint = iocb->ki_hint;
228 bio->bi_private = dio;
229 bio->bi_end_io = blkdev_bio_end_io;
230 bio->bi_ioprio = iocb->ki_ioprio;
231
232 ret = bio_iov_iter_get_pages(bio, iter);
233 if (unlikely(ret)) {
234 bio->bi_status = BLK_STS_IOERR;
235 bio_endio(bio);
236 break;
237 }
238
239 if (is_read) {
240 bio->bi_opf = REQ_OP_READ;
Jens Axboe09ce8742021-10-14 11:17:43 -0600241 if (dio->flags & DIO_SHOULD_DIRTY)
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200242 bio_set_pages_dirty(bio);
243 } else {
244 bio->bi_opf = dio_bio_write_op(iocb);
245 task_io_account_write(bio->bi_iter.bi_size);
246 }
247 if (iocb->ki_flags & IOCB_NOWAIT)
248 bio->bi_opf |= REQ_NOWAIT;
249
250 dio->size += bio->bi_iter.bi_size;
251 pos += bio->bi_iter.bi_size;
252
253 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS);
254 if (!nr_pages) {
Christoph Hellwig3e087732021-10-12 13:12:24 +0200255 submit_bio(bio);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200256 break;
257 }
Pavel Begunkove71aa912021-10-27 13:21:09 +0100258 atomic_inc(&dio->ref);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200259 submit_bio(bio);
260 bio = bio_alloc(GFP_KERNEL, nr_pages);
261 }
262
Pavel Begunkov25d207d2021-10-27 13:21:08 +0100263 blk_finish_plug(&plug);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200264
265 if (!is_sync)
266 return -EIOCBQUEUED;
267
268 for (;;) {
269 set_current_state(TASK_UNINTERRUPTIBLE);
270 if (!READ_ONCE(dio->waiter))
271 break;
Pavel Begunkov25d207d2021-10-27 13:21:08 +0100272 blk_io_schedule();
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200273 }
274 __set_current_state(TASK_RUNNING);
275
276 if (!ret)
277 ret = blk_status_to_errno(dio->bio.bi_status);
278 if (likely(!ret))
279 ret = dio->size;
280
281 bio_put(&dio->bio);
282 return ret;
283}
284
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100285static void blkdev_bio_end_io_async(struct bio *bio)
286{
287 struct blkdev_dio *dio = container_of(bio, struct blkdev_dio, bio);
288 struct kiocb *iocb = dio->iocb;
289 ssize_t ret;
290
291 if (likely(!bio->bi_status)) {
292 ret = dio->size;
293 iocb->ki_pos += ret;
294 } else {
295 ret = blk_status_to_errno(bio->bi_status);
296 }
297
Linus Torvaldsb6773cd2021-11-01 10:17:11 -0700298 iocb->ki_complete(iocb, ret);
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100299
300 if (dio->flags & DIO_SHOULD_DIRTY) {
301 bio_check_pages_dirty(bio);
302 } else {
303 bio_release_pages(bio, false);
304 bio_put(bio);
305 }
306}
307
308static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
309 struct iov_iter *iter,
310 unsigned int nr_pages)
311{
312 struct block_device *bdev = iocb->ki_filp->private_data;
313 struct blkdev_dio *dio;
314 struct bio *bio;
315 loff_t pos = iocb->ki_pos;
316 int ret = 0;
317
318 if ((pos | iov_iter_alignment(iter)) &
319 (bdev_logical_block_size(bdev) - 1))
320 return -EINVAL;
321
322 bio = bio_alloc_kiocb(iocb, nr_pages, &blkdev_dio_pool);
323 dio = container_of(bio, struct blkdev_dio, bio);
324 dio->flags = 0;
325 dio->iocb = iocb;
326 bio_set_dev(bio, bdev);
327 bio->bi_iter.bi_sector = pos >> SECTOR_SHIFT;
328 bio->bi_write_hint = iocb->ki_hint;
329 bio->bi_end_io = blkdev_bio_end_io_async;
330 bio->bi_ioprio = iocb->ki_ioprio;
331
Pavel Begunkov1bb6b812021-10-27 13:21:07 +0100332 if (iov_iter_is_bvec(iter)) {
333 /*
334 * Users don't rely on the iterator being in any particular
335 * state for async I/O returning -EIOCBQUEUED, hence we can
336 * avoid expensive iov_iter_advance(). Bypass
337 * bio_iov_iter_get_pages() and set the bvec directly.
338 */
339 bio_iov_bvec_set(bio, iter);
340 } else {
341 ret = bio_iov_iter_get_pages(bio, iter);
342 if (unlikely(ret)) {
Pavel Begunkov75feae72021-12-07 20:16:36 +0000343 bio_put(bio);
Pavel Begunkov1bb6b812021-10-27 13:21:07 +0100344 return ret;
345 }
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100346 }
347 dio->size = bio->bi_iter.bi_size;
348
349 if (iov_iter_rw(iter) == READ) {
350 bio->bi_opf = REQ_OP_READ;
351 if (iter_is_iovec(iter)) {
352 dio->flags |= DIO_SHOULD_DIRTY;
353 bio_set_pages_dirty(bio);
354 }
355 } else {
356 bio->bi_opf = dio_bio_write_op(iocb);
357 task_io_account_write(bio->bi_iter.bi_size);
358 }
359
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100360 if (iocb->ki_flags & IOCB_HIPRI) {
Pavel Begunkov842e39b2021-10-27 13:21:10 +0100361 bio->bi_opf |= REQ_POLLED | REQ_NOWAIT;
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100362 submit_bio(bio);
363 WRITE_ONCE(iocb->private, bio);
364 } else {
Pavel Begunkov842e39b2021-10-27 13:21:10 +0100365 if (iocb->ki_flags & IOCB_NOWAIT)
366 bio->bi_opf |= REQ_NOWAIT;
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100367 submit_bio(bio);
368 }
369 return -EIOCBQUEUED;
370}
371
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200372static ssize_t blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
373{
374 unsigned int nr_pages;
375
376 if (!iov_iter_count(iter))
377 return 0;
378
379 nr_pages = bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS + 1);
Pavel Begunkov54a88eb2021-10-23 17:21:32 +0100380 if (likely(nr_pages <= BIO_MAX_VECS)) {
381 if (is_sync_kiocb(iocb))
382 return __blkdev_direct_IO_simple(iocb, iter, nr_pages);
383 return __blkdev_direct_IO_async(iocb, iter, nr_pages);
384 }
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200385 return __blkdev_direct_IO(iocb, iter, bio_max_segs(nr_pages));
386}
387
388static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
389{
390 return block_write_full_page(page, blkdev_get_block, wbc);
391}
392
393static int blkdev_readpage(struct file * file, struct page * page)
394{
395 return block_read_full_page(page, blkdev_get_block);
396}
397
398static void blkdev_readahead(struct readahead_control *rac)
399{
400 mpage_readahead(rac, blkdev_get_block);
401}
402
403static int blkdev_write_begin(struct file *file, struct address_space *mapping,
404 loff_t pos, unsigned len, unsigned flags, struct page **pagep,
405 void **fsdata)
406{
407 return block_write_begin(mapping, pos, len, flags, pagep,
408 blkdev_get_block);
409}
410
411static int blkdev_write_end(struct file *file, struct address_space *mapping,
412 loff_t pos, unsigned len, unsigned copied, struct page *page,
413 void *fsdata)
414{
415 int ret;
416 ret = block_write_end(file, mapping, pos, len, copied, page, fsdata);
417
418 unlock_page(page);
419 put_page(page);
420
421 return ret;
422}
423
424static int blkdev_writepages(struct address_space *mapping,
425 struct writeback_control *wbc)
426{
427 return generic_writepages(mapping, wbc);
428}
429
430const struct address_space_operations def_blk_aops = {
431 .set_page_dirty = __set_page_dirty_buffers,
432 .readpage = blkdev_readpage,
433 .readahead = blkdev_readahead,
434 .writepage = blkdev_writepage,
435 .write_begin = blkdev_write_begin,
436 .write_end = blkdev_write_end,
437 .writepages = blkdev_writepages,
438 .direct_IO = blkdev_direct_IO,
439 .migratepage = buffer_migrate_page_norefs,
440 .is_dirty_writeback = buffer_check_dirty_writeback,
441};
442
443/*
444 * for a block special file file_inode(file)->i_size is zero
445 * so we compute the size by hand (just as in block_read/write above)
446 */
447static loff_t blkdev_llseek(struct file *file, loff_t offset, int whence)
448{
449 struct inode *bd_inode = bdev_file_inode(file);
450 loff_t retval;
451
452 inode_lock(bd_inode);
453 retval = fixed_size_llseek(file, offset, whence, i_size_read(bd_inode));
454 inode_unlock(bd_inode);
455 return retval;
456}
457
458static int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
459 int datasync)
460{
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100461 struct block_device *bdev = filp->private_data;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200462 int error;
463
464 error = file_write_and_wait_range(filp, start, end);
465 if (error)
466 return error;
467
468 /*
469 * There is no need to serialise calls to blkdev_issue_flush with
470 * i_mutex and doing so causes performance issues with concurrent
471 * O_SYNC writers to a block device.
472 */
473 error = blkdev_issue_flush(bdev);
474 if (error == -EOPNOTSUPP)
475 error = 0;
476
477 return error;
478}
479
480static int blkdev_open(struct inode *inode, struct file *filp)
481{
482 struct block_device *bdev;
483
484 /*
485 * Preserve backwards compatibility and allow large file access
486 * even if userspace doesn't ask for it explicitly. Some mkfs
487 * binary needs it. We might want to drop this workaround
488 * during an unstable branch.
489 */
490 filp->f_flags |= O_LARGEFILE;
491 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
492
493 if (filp->f_flags & O_NDELAY)
494 filp->f_mode |= FMODE_NDELAY;
495 if (filp->f_flags & O_EXCL)
496 filp->f_mode |= FMODE_EXCL;
497 if ((filp->f_flags & O_ACCMODE) == 3)
498 filp->f_mode |= FMODE_WRITE_IOCTL;
499
500 bdev = blkdev_get_by_dev(inode->i_rdev, filp->f_mode, filp);
501 if (IS_ERR(bdev))
502 return PTR_ERR(bdev);
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100503
504 filp->private_data = bdev;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200505 filp->f_mapping = bdev->bd_inode->i_mapping;
506 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
507 return 0;
508}
509
510static int blkdev_close(struct inode *inode, struct file *filp)
511{
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100512 struct block_device *bdev = filp->private_data;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200513
514 blkdev_put(bdev, filp->f_mode);
515 return 0;
516}
517
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200518/*
519 * Write data to the block device. Only intended for the block device itself
520 * and the raw driver which basically is a fake block device.
521 *
522 * Does not take i_mutex for the write and thus is not for general purpose
523 * use.
524 */
525static ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
526{
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100527 struct block_device *bdev = iocb->ki_filp->private_data;
528 struct inode *bd_inode = bdev->bd_inode;
Jens Axboe138c1a32021-11-04 15:13:17 -0600529 loff_t size = bdev_nr_bytes(bdev);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200530 struct blk_plug plug;
531 size_t shorted = 0;
532 ssize_t ret;
533
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100534 if (bdev_read_only(bdev))
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200535 return -EPERM;
536
537 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev))
538 return -ETXTBSY;
539
540 if (!iov_iter_count(from))
541 return 0;
542
543 if (iocb->ki_pos >= size)
544 return -ENOSPC;
545
546 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT)
547 return -EOPNOTSUPP;
548
549 size -= iocb->ki_pos;
550 if (iov_iter_count(from) > size) {
551 shorted = iov_iter_count(from) - size;
552 iov_iter_truncate(from, size);
553 }
554
555 blk_start_plug(&plug);
556 ret = __generic_file_write_iter(iocb, from);
557 if (ret > 0)
558 ret = generic_write_sync(iocb, ret);
559 iov_iter_reexpand(from, iov_iter_count(from) + shorted);
560 blk_finish_plug(&plug);
561 return ret;
562}
563
564static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
565{
Pavel Begunkovfac7c6d2021-10-13 09:57:11 +0100566 struct block_device *bdev = iocb->ki_filp->private_data;
Jens Axboe138c1a32021-11-04 15:13:17 -0600567 loff_t size = bdev_nr_bytes(bdev);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200568 loff_t pos = iocb->ki_pos;
569 size_t shorted = 0;
570 ssize_t ret;
571
Pavel Begunkov6450fe12021-10-20 20:00:48 +0100572 if (unlikely(pos + iov_iter_count(to) > size)) {
573 if (pos >= size)
574 return 0;
575 size -= pos;
576 if (iov_iter_count(to) > size) {
577 shorted = iov_iter_count(to) - size;
578 iov_iter_truncate(to, size);
579 }
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200580 }
581
582 ret = generic_file_read_iter(iocb, to);
Pavel Begunkov6450fe12021-10-20 20:00:48 +0100583
584 if (unlikely(shorted))
585 iov_iter_reexpand(to, iov_iter_count(to) + shorted);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200586 return ret;
587}
588
589#define BLKDEV_FALLOC_FL_SUPPORTED \
590 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
591 FALLOC_FL_ZERO_RANGE | FALLOC_FL_NO_HIDE_STALE)
592
593static long blkdev_fallocate(struct file *file, int mode, loff_t start,
594 loff_t len)
595{
Ming Leif278eb3d2021-09-23 10:37:51 +0800596 struct inode *inode = bdev_file_inode(file);
597 struct block_device *bdev = I_BDEV(inode);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200598 loff_t end = start + len - 1;
599 loff_t isize;
600 int error;
601
602 /* Fail if we don't recognize the flags. */
603 if (mode & ~BLKDEV_FALLOC_FL_SUPPORTED)
604 return -EOPNOTSUPP;
605
606 /* Don't go off the end of the device. */
Christoph Hellwig2a93ad82021-10-18 12:11:24 +0200607 isize = bdev_nr_bytes(bdev);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200608 if (start >= isize)
609 return -EINVAL;
610 if (end >= isize) {
611 if (mode & FALLOC_FL_KEEP_SIZE) {
612 len = isize - start;
613 end = start + len - 1;
614 } else
615 return -EINVAL;
616 }
617
618 /*
619 * Don't allow IO that isn't aligned to logical block size.
620 */
621 if ((start | len) & (bdev_logical_block_size(bdev) - 1))
622 return -EINVAL;
623
Ming Leif278eb3d2021-09-23 10:37:51 +0800624 filemap_invalidate_lock(inode->i_mapping);
625
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200626 /* Invalidate the page cache, including dirty pages. */
627 error = truncate_bdev_range(bdev, file->f_mode, start, end);
628 if (error)
Ming Leif278eb3d2021-09-23 10:37:51 +0800629 goto fail;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200630
631 switch (mode) {
632 case FALLOC_FL_ZERO_RANGE:
633 case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
Pavel Begunkov6549a872021-10-20 20:00:50 +0100634 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
635 len >> SECTOR_SHIFT, GFP_KERNEL,
636 BLKDEV_ZERO_NOUNMAP);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200637 break;
638 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
Pavel Begunkov6549a872021-10-20 20:00:50 +0100639 error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
640 len >> SECTOR_SHIFT, GFP_KERNEL,
641 BLKDEV_ZERO_NOFALLBACK);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200642 break;
643 case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
Pavel Begunkov6549a872021-10-20 20:00:50 +0100644 error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
645 len >> SECTOR_SHIFT, GFP_KERNEL, 0);
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200646 break;
647 default:
Ming Leif278eb3d2021-09-23 10:37:51 +0800648 error = -EOPNOTSUPP;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200649 }
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200650
Ming Leif278eb3d2021-09-23 10:37:51 +0800651 fail:
652 filemap_invalidate_unlock(inode->i_mapping);
653 return error;
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200654}
655
656const struct file_operations def_blk_fops = {
657 .open = blkdev_open,
658 .release = blkdev_close,
659 .llseek = blkdev_llseek,
660 .read_iter = blkdev_read_iter,
661 .write_iter = blkdev_write_iter,
Christoph Hellwig3e087732021-10-12 13:12:24 +0200662 .iopoll = iocb_bio_iopoll,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200663 .mmap = generic_file_mmap,
664 .fsync = blkdev_fsync,
Christoph Hellwig8a709512021-10-12 12:44:50 +0200665 .unlocked_ioctl = blkdev_ioctl,
Christoph Hellwigcd82cca2021-09-07 16:13:02 +0200666#ifdef CONFIG_COMPAT
667 .compat_ioctl = compat_blkdev_ioctl,
668#endif
669 .splice_read = generic_file_splice_read,
670 .splice_write = iter_file_splice_write,
671 .fallocate = blkdev_fallocate,
672};
673
674static __init int blkdev_init(void)
675{
676 return bioset_init(&blkdev_dio_pool, 4,
677 offsetof(struct blkdev_dio, bio),
678 BIOSET_NEED_BVECS|BIOSET_PERCPU_CACHE);
679}
680module_init(blkdev_init);