blob: d6baa4f451c5fa8dbaef2181eee795178cdcee69 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * mm/fadvise.c
4 *
5 * Copyright (C) 2002, Linus Torvalds
6 *
Francois Camie1f8e872008-10-15 22:01:59 -07007 * 11Jan2003 Andrew Morton
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Initial version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/file.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/pagemap.h>
16#include <linux/backing-dev.h>
17#include <linux/pagevec.h>
18#include <linux/fadvise.h>
Andrew Mortonebcf28e2006-03-24 03:18:04 -080019#include <linux/writeback.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/syscalls.h>
Mel Gorman67d46b22013-02-22 16:35:59 -080021#include <linux/swap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
23#include <asm/unistd.h>
24
Matthew Wilcox (Oracle)cee9a0c2020-06-01 21:46:07 -070025#include "internal.h"
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027/*
28 * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
29 * deactivate the pages and clear PG_Referenced.
30 */
Dominik Brodowski9d5b7c92018-03-11 11:34:45 +010031
Jan Karacf1ea052019-08-29 09:04:11 -070032int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
Matthew Wilcoxe748dcd2015-02-16 15:59:12 -080034 struct inode *inode;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 struct address_space *mapping;
36 struct backing_dev_info *bdi;
Andrew Mortonebcf28e2006-03-24 03:18:04 -080037 loff_t endbyte; /* inclusive */
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 pgoff_t start_index;
39 pgoff_t end_index;
40 unsigned long nrpages;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Amir Goldstein45cd0fa2018-08-27 15:56:02 +030042 inode = file_inode(file);
43 if (S_ISFIFO(inode->i_mode))
44 return -ESPIPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Amir Goldstein45cd0fa2018-08-27 15:56:02 +030046 mapping = file->f_mapping;
47 if (!mapping || len < 0)
48 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Shakeel Butt3a77d212017-09-08 16:13:05 -070050 bdi = inode_to_bdi(mapping->host);
51
52 if (IS_DAX(inode) || (bdi == &noop_backing_dev_info)) {
Masatake YAMATOb5beb1c2008-02-04 22:29:31 -080053 switch (advice) {
54 case POSIX_FADV_NORMAL:
55 case POSIX_FADV_RANDOM:
56 case POSIX_FADV_SEQUENTIAL:
57 case POSIX_FADV_WILLNEED:
58 case POSIX_FADV_NOREUSE:
59 case POSIX_FADV_DONTNEED:
60 /* no bad return value, but ignore advice */
61 break;
62 default:
Amir Goldstein45cd0fa2018-08-27 15:56:02 +030063 return -EINVAL;
Masatake YAMATOb5beb1c2008-02-04 22:29:31 -080064 }
Amir Goldstein45cd0fa2018-08-27 15:56:02 +030065 return 0;
Masatake YAMATOb5beb1c2008-02-04 22:29:31 -080066 }
Carsten Ottefe77ba62005-06-23 22:05:29 -070067
Andrey Ryabinina718e282018-08-17 15:46:57 -070068 /*
69 * Careful about overflows. Len == 0 means "as much as possible". Use
70 * unsigned math because signed overflows are undefined and UBSan
71 * complains.
72 */
73 endbyte = (u64)offset + (u64)len;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 if (!len || endbyte < len)
75 endbyte = -1;
Andrew Mortonebcf28e2006-03-24 03:18:04 -080076 else
77 endbyte--; /* inclusive */
Linus Torvalds1da177e2005-04-16 15:20:36 -070078
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 switch (advice) {
80 case POSIX_FADV_NORMAL:
Amir Goldstein45cd0fa2018-08-27 15:56:02 +030081 file->f_ra.ra_pages = bdi->ra_pages;
82 spin_lock(&file->f_lock);
83 file->f_mode &= ~FMODE_RANDOM;
84 spin_unlock(&file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085 break;
86 case POSIX_FADV_RANDOM:
Amir Goldstein45cd0fa2018-08-27 15:56:02 +030087 spin_lock(&file->f_lock);
88 file->f_mode |= FMODE_RANDOM;
89 spin_unlock(&file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 break;
91 case POSIX_FADV_SEQUENTIAL:
Amir Goldstein45cd0fa2018-08-27 15:56:02 +030092 file->f_ra.ra_pages = bdi->ra_pages * 2;
93 spin_lock(&file->f_lock);
94 file->f_mode &= ~FMODE_RANDOM;
95 spin_unlock(&file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 break;
97 case POSIX_FADV_WILLNEED:
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 /* First and last PARTIAL page! */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030099 start_index = offset >> PAGE_SHIFT;
100 end_index = endbyte >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 /* Careful about overflow on the "+1" */
103 nrpages = end_index - start_index + 1;
104 if (!nrpages)
105 nrpages = ~0UL;
KOSAKI Motohiro3d3727c2012-07-31 16:42:50 -0700106
Amir Goldstein45cd0fa2018-08-27 15:56:02 +0300107 force_page_cache_readahead(mapping, file, start_index, nrpages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 break;
Andrew Morton60c371b2006-08-05 12:14:25 -0700109 case POSIX_FADV_NOREUSE:
110 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 case POSIX_FADV_DONTNEED:
Tejun Heo703c2702015-05-22 17:13:44 -0400112 if (!inode_write_congested(mapping->host))
Shawn Bohrerad8a1b52012-01-10 15:07:35 -0800113 __filemap_fdatawrite_range(mapping, offset, endbyte,
114 WB_SYNC_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Mel Gorman441c2282014-12-12 16:56:33 -0800116 /*
117 * First and last FULL page! Partial pages are deliberately
118 * preserved on the expectation that it is better to preserve
119 * needed memory than to discard unneeded memory.
120 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300121 start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
122 end_index = (endbyte >> PAGE_SHIFT);
shidao.ytta7ab4002018-01-31 16:19:55 -0800123 /*
124 * The page at end_index will be inclusively discarded according
125 * by invalidate_mapping_pages(), so subtracting 1 from
126 * end_index means we will skip the last page. But if endbyte
127 * is page aligned or is at the end of file, we should not skip
128 * that page - discarding the last page is safe enough.
129 */
130 if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
131 endbyte != inode->i_size - 1) {
Oleg Drokin18aba412016-06-08 15:33:59 -0700132 /* First page is tricky as 0 - 1 = -1, but pgoff_t
133 * is unsigned, so the end_index >= start_index
134 * check below would be true and we'll discard the whole
135 * file cache which is not what was asked.
136 */
137 if (end_index == 0)
138 break;
139
140 end_index--;
141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Mel Gorman67d46b22013-02-22 16:35:59 -0800143 if (end_index >= start_index) {
Yafang Shaoeb1d7a62020-10-13 16:51:47 -0700144 unsigned long nr_pagevec = 0;
Johannes Weiner4dd72b42016-12-19 16:23:03 -0800145
146 /*
147 * It's common to FADV_DONTNEED right after
148 * the read or write that instantiates the
149 * pages, in which case there will be some
150 * sitting on the local LRU cache. Try to
151 * avoid the expensive remote drain and the
152 * second cache tree walk below by flushing
153 * them out right away.
154 */
155 lru_add_drain();
156
Yafang Shaoeb1d7a62020-10-13 16:51:47 -0700157 invalidate_mapping_pagevec(mapping,
158 start_index, end_index,
159 &nr_pagevec);
Mel Gorman67d46b22013-02-22 16:35:59 -0800160
161 /*
162 * If fewer pages were invalidated than expected then
163 * it is possible that some of the pages were on
164 * a per-cpu pagevec for a remote CPU. Drain all
165 * pagevecs and try again.
166 */
Yafang Shaoeb1d7a62020-10-13 16:51:47 -0700167 if (nr_pagevec) {
Mel Gorman67d46b22013-02-22 16:35:59 -0800168 lru_add_drain_all();
169 invalidate_mapping_pages(mapping, start_index,
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800170 end_index);
Mel Gorman67d46b22013-02-22 16:35:59 -0800171 }
172 }
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800173 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 default:
Amir Goldstein45cd0fa2018-08-27 15:56:02 +0300175 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 }
Amir Goldstein45cd0fa2018-08-27 15:56:02 +0300177 return 0;
178}
Jan Karacf1ea052019-08-29 09:04:11 -0700179EXPORT_SYMBOL(generic_fadvise);
Amir Goldstein45cd0fa2018-08-27 15:56:02 +0300180
181int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
182{
183 if (file->f_op->fadvise)
184 return file->f_op->fadvise(file, offset, len, advice);
185
186 return generic_fadvise(file, offset, len, advice);
187}
188EXPORT_SYMBOL(vfs_fadvise);
189
Amir Goldstein3d8f7612018-08-29 08:41:29 +0300190#ifdef CONFIG_ADVISE_SYSCALLS
191
Amir Goldstein45cd0fa2018-08-27 15:56:02 +0300192int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
193{
194 struct fd f = fdget(fd);
195 int ret;
196
197 if (!f.file)
198 return -EBADF;
199
200 ret = vfs_fadvise(f.file, offset, len, advice);
201
Al Viro2903ff02012-08-28 12:52:22 -0400202 fdput(f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 return ret;
204}
205
Dominik Brodowski9d5b7c92018-03-11 11:34:45 +0100206SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
207{
208 return ksys_fadvise64_64(fd, offset, len, advice);
209}
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211#ifdef __ARCH_WANT_SYS_FADVISE64
212
Al Viro4a0fd5b2013-01-21 15:16:58 -0500213SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Dominik Brodowski9d5b7c92018-03-11 11:34:45 +0100215 return ksys_fadvise64_64(fd, offset, len, advice);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218#endif
Amir Goldstein3d8f7612018-08-29 08:41:29 +0300219#endif