blob: 229481d258bc89f344a5dd7899f456353a13919d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/mm/filemap.c
3 *
4 * Copyright (C) 1994-1999 Linus Torvalds
5 */
6
7/*
8 * This file handles the generic file mmap semantics used by
9 * most "normal" filesystems (but you don't /have/ to use this:
10 * the NFS filesystem used to do this differently, for example)
11 */
Paul Gortmakerb95f1b312011-10-16 02:01:52 -040012#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/compiler.h>
Ross Zwislerf9fe48b2016-01-22 15:10:40 -080014#include <linux/dax.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/fs.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010016#include <linux/sched/signal.h>
Hiro Yoshiokac22ce142006-06-23 02:04:16 -070017#include <linux/uaccess.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080018#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/kernel_stat.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/mm.h>
22#include <linux/swap.h>
23#include <linux/mman.h>
24#include <linux/pagemap.h>
25#include <linux/file.h>
26#include <linux/uio.h>
27#include <linux/hash.h>
28#include <linux/writeback.h>
Linus Torvalds53253382007-10-18 14:47:32 -070029#include <linux/backing-dev.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/pagevec.h>
31#include <linux/blkdev.h>
32#include <linux/security.h>
Paul Jackson44110fe2006-03-24 03:16:04 -080033#include <linux/cpuset.h>
Nick Piggin2f718ff2007-10-16 01:24:59 -070034#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
Johannes Weiner00501b52014-08-08 14:19:20 -070035#include <linux/hugetlb.h>
Balbir Singh8a9f3cc2008-02-07 00:13:53 -080036#include <linux/memcontrol.h>
Dan Magenheimerc515e1f2011-05-26 10:01:43 -060037#include <linux/cleancache.h>
Kirill A. Shutemovf1820362014-04-07 15:37:19 -070038#include <linux/rmap.h>
Nick Piggin0f8053a2006-03-22 00:08:33 -080039#include "internal.h"
40
Robert Jarzmikfe0bfaa2013-04-29 15:06:10 -070041#define CREATE_TRACE_POINTS
42#include <trace/events/filemap.h>
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 * FIXME: remove all knowledge of the buffer layer from the core VM
46 */
Jan Kara148f9482009-08-17 19:52:36 +020047#include <linux/buffer_head.h> /* for try_to_free_buffers */
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/mman.h>
50
51/*
52 * Shared mappings implemented 30.11.1994. It's not fully working yet,
53 * though.
54 *
55 * Shared mappings now work. 15.8.1995 Bruno.
56 *
57 * finished 'unifying' the page and buffer cache and SMP-threaded the
58 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
59 *
60 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
61 */
62
63/*
64 * Lock ordering:
65 *
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -080066 * ->i_mmap_rwsem (truncate_pagecache)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 * ->private_lock (__free_pte->__set_page_dirty_buffers)
Hugh Dickins5d337b92005-09-03 15:54:41 -070068 * ->swap_lock (exclusive_swap_page, others)
69 * ->mapping->tree_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 *
Jes Sorensen1b1dcc12006-01-09 15:59:24 -080071 * ->i_mutex
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -080072 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 *
74 * ->mmap_sem
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -080075 * ->i_mmap_rwsem
Hugh Dickinsb8072f02005-10-29 18:16:41 -070076 * ->page_table_lock or pte_lock (various, mainly in memory.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
78 *
79 * ->mmap_sem
80 * ->lock_page (access_process_vm)
81 *
Al Viroccad2362014-02-11 22:36:48 -050082 * ->i_mutex (generic_perform_write)
Nick Piggin82591e62006-10-19 23:29:10 -070083 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 *
Christoph Hellwigf758eea2011-04-21 18:19:44 -060085 * bdi->wb.list_lock
Dave Chinnera66979a2011-03-22 22:23:41 +110086 * sb_lock (fs/fs-writeback.c)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 * ->mapping->tree_lock (__sync_single_inode)
88 *
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -080089 * ->i_mmap_rwsem
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 * ->anon_vma.lock (vma_adjust)
91 *
92 * ->anon_vma.lock
Hugh Dickinsb8072f02005-10-29 18:16:41 -070093 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 *
Hugh Dickinsb8072f02005-10-29 18:16:41 -070095 * ->page_table_lock or pte_lock
Hugh Dickins5d337b92005-09-03 15:54:41 -070096 * ->swap_lock (try_to_unmap_one)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097 * ->private_lock (try_to_unmap_one)
98 * ->tree_lock (try_to_unmap_one)
Mel Gormana52633d2016-07-28 15:45:28 -070099 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed)
100 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * ->private_lock (page_remove_rmap->set_page_dirty)
102 * ->tree_lock (page_remove_rmap->set_page_dirty)
Christoph Hellwigf758eea2011-04-21 18:19:44 -0600103 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
Dave Chinner250df6e2011-03-22 22:23:36 +1100104 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
Johannes Weiner81f8c3a2016-03-15 14:57:04 -0700105 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
Christoph Hellwigf758eea2011-04-21 18:19:44 -0600106 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
Dave Chinner250df6e2011-03-22 22:23:36 +1100107 * ->inode->i_lock (zap_pte_range->set_page_dirty)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
109 *
Davidlohr Buesoc8c06ef2014-12-12 16:54:24 -0800110 * ->i_mmap_rwsem
Andi Kleen9a3c5312012-03-21 16:34:09 -0700111 * ->tasklist_lock (memory_failure, collect_procs_ao)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 */
113
Johannes Weiner22f2ac52016-09-30 15:11:29 -0700114static int page_cache_tree_insert(struct address_space *mapping,
115 struct page *page, void **shadowp)
116{
117 struct radix_tree_node *node;
118 void **slot;
119 int error;
120
121 error = __radix_tree_create(&mapping->page_tree, page->index, 0,
122 &node, &slot);
123 if (error)
124 return error;
125 if (*slot) {
126 void *p;
127
128 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
129 if (!radix_tree_exceptional_entry(p))
130 return -EEXIST;
131
132 mapping->nrexceptional--;
Ross Zwislerd01ad192017-09-06 16:18:47 -0700133 if (shadowp)
134 *shadowp = p;
Johannes Weiner22f2ac52016-09-30 15:11:29 -0700135 }
Johannes Weiner14b46872016-12-12 16:43:52 -0800136 __radix_tree_replace(&mapping->page_tree, node, slot, page,
137 workingset_update_node, mapping);
Johannes Weiner22f2ac52016-09-30 15:11:29 -0700138 mapping->nrpages++;
Johannes Weiner22f2ac52016-09-30 15:11:29 -0700139 return 0;
140}
141
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700142static void page_cache_tree_delete(struct address_space *mapping,
143 struct page *page, void *shadow)
144{
Kirill A. Shutemovc70b6472016-12-12 16:43:17 -0800145 int i, nr;
146
147 /* hugetlb pages are represented by one entry in the radix tree */
148 nr = PageHuge(page) ? 1 : hpage_nr_pages(page);
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700149
Kirill A. Shutemov83929372016-07-26 15:26:04 -0700150 VM_BUG_ON_PAGE(!PageLocked(page), page);
151 VM_BUG_ON_PAGE(PageTail(page), page);
152 VM_BUG_ON_PAGE(nr != 1 && shadow, page);
Johannes Weiner449dd692014-04-03 14:47:56 -0700153
Kirill A. Shutemov83929372016-07-26 15:26:04 -0700154 for (i = 0; i < nr; i++) {
Johannes Weinerd3798ae2016-10-04 22:02:08 +0200155 struct radix_tree_node *node;
156 void **slot;
157
158 __radix_tree_lookup(&mapping->page_tree, page->index + i,
159 &node, &slot);
160
Johannes Weinerdbc446b2016-12-12 16:43:55 -0800161 VM_BUG_ON_PAGE(!node && nr != 1, page);
Johannes Weiner449dd692014-04-03 14:47:56 -0700162
Johannes Weiner14b46872016-12-12 16:43:52 -0800163 radix_tree_clear_tags(&mapping->page_tree, node, slot);
164 __radix_tree_replace(&mapping->page_tree, node, slot, shadow,
165 workingset_update_node, mapping);
Johannes Weiner449dd692014-04-03 14:47:56 -0700166 }
Johannes Weinerd3798ae2016-10-04 22:02:08 +0200167
168 if (shadow) {
169 mapping->nrexceptional += nr;
170 /*
171 * Make sure the nrexceptional update is committed before
172 * the nrpages update so that final truncate racing
173 * with reclaim does not see both counters 0 at the
174 * same time and miss a shadow entry.
175 */
176 smp_wmb();
177 }
178 mapping->nrpages -= nr;
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700179}
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181/*
Minchan Kime64a7822011-03-22 16:32:44 -0700182 * Delete a page from the page cache and free it. Caller has to make
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * sure the page is locked and that nobody else uses it - or that usage
Johannes Weinerfdf1cdb2016-03-15 14:57:25 -0700184 * is safe. The caller must hold the mapping's tree_lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 */
Johannes Weiner62cccb82016-03-15 14:57:22 -0700186void __delete_from_page_cache(struct page *page, void *shadow)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
188 struct address_space *mapping = page->mapping;
Kirill A. Shutemov83929372016-07-26 15:26:04 -0700189 int nr = hpage_nr_pages(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Robert Jarzmikfe0bfaa2013-04-29 15:06:10 -0700191 trace_mm_filemap_delete_from_page_cache(page);
Dan Magenheimerc515e1f2011-05-26 10:01:43 -0600192 /*
193 * if we're uptodate, flush out into the cleancache, otherwise
194 * invalidate any existing cleancache entries. We can't leave
195 * stale data around in the cleancache once our page is gone
196 */
197 if (PageUptodate(page) && PageMappedToDisk(page))
198 cleancache_put_page(page);
199 else
Dan Magenheimer31677602011-09-21 11:56:28 -0400200 cleancache_invalidate_page(mapping, page);
Dan Magenheimerc515e1f2011-05-26 10:01:43 -0600201
Kirill A. Shutemov83929372016-07-26 15:26:04 -0700202 VM_BUG_ON_PAGE(PageTail(page), page);
Hugh Dickins06b241f2016-03-09 14:08:07 -0800203 VM_BUG_ON_PAGE(page_mapped(page), page);
204 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
205 int mapcount;
206
207 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
208 current->comm, page_to_pfn(page));
209 dump_page(page, "still mapped when deleted");
210 dump_stack();
211 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
212
213 mapcount = page_mapcount(page);
214 if (mapping_exiting(mapping) &&
215 page_count(page) >= mapcount + 2) {
216 /*
217 * All vmas have already been torn down, so it's
218 * a good bet that actually the page is unmapped,
219 * and we'd prefer not to leak it: if we're wrong,
220 * some other bad page check should catch it later.
221 */
222 page_mapcount_reset(page);
Joonsoo Kim6d061f92016-05-19 17:10:46 -0700223 page_ref_sub(page, mapcount);
Hugh Dickins06b241f2016-03-09 14:08:07 -0800224 }
225 }
226
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700227 page_cache_tree_delete(mapping, page, shadow);
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 page->mapping = NULL;
Hugh Dickinsb85e0ef2011-07-25 17:12:25 -0700230 /* Leave page->index set: truncation lookup relies upon it */
Johannes Weiner91b0abe2014-04-03 14:47:49 -0700231
Michal Hocko4165b9b2015-06-24 16:57:24 -0700232 /* hugetlb pages do not participate in page cache accounting. */
Naoya Horiguchi09612fa2017-07-10 15:47:35 -0700233 if (PageHuge(page))
234 return;
235
236 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700237 if (PageSwapBacked(page)) {
Mel Gorman11fb9982016-07-28 15:46:20 -0700238 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr);
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700239 if (PageTransHuge(page))
Mel Gorman11fb9982016-07-28 15:46:20 -0700240 __dec_node_page_state(page, NR_SHMEM_THPS);
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700241 } else {
Naoya Horiguchi09612fa2017-07-10 15:47:35 -0700242 VM_BUG_ON_PAGE(PageTransHuge(page), page);
Kirill A. Shutemov800d8c62016-07-26 15:26:18 -0700243 }
Linus Torvalds3a692792007-12-19 14:05:13 -0800244
245 /*
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700246 * At this point page must be either written or cleaned by truncate.
247 * Dirty page here signals a bug and loss of unwritten data.
Linus Torvalds3a692792007-12-19 14:05:13 -0800248 *
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700249 * This fixes dirty accounting after removing the page entirely but
250 * leaves PageDirty set: it has no effect for truncated page and
251 * anyway will be cleared before returning page into buddy allocator.
Linus Torvalds3a692792007-12-19 14:05:13 -0800252 */
Konstantin Khlebnikovb9ea2512015-04-14 15:45:27 -0700253 if (WARN_ON_ONCE(PageDirty(page)))
Johannes Weiner62cccb82016-03-15 14:57:22 -0700254 account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
Minchan Kim702cfbf2011-03-22 16:32:43 -0700257/**
258 * delete_from_page_cache - delete page from page cache
259 * @page: the page which the kernel is trying to remove from page cache
260 *
261 * This must be called only on pages that have been verified to be in the page
262 * cache and locked. It will never put the page into the free list, the caller
263 * has a reference on the page.
264 */
265void delete_from_page_cache(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
Kirill A. Shutemov83929372016-07-26 15:26:04 -0700267 struct address_space *mapping = page_mapping(page);
Greg Thelenc4843a72015-05-22 17:13:16 -0400268 unsigned long flags;
Linus Torvalds6072d132010-12-01 13:35:19 -0500269 void (*freepage)(struct page *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Matt Mackallcd7619d2005-05-01 08:59:01 -0700271 BUG_ON(!PageLocked(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Linus Torvalds6072d132010-12-01 13:35:19 -0500273 freepage = mapping->a_ops->freepage;
Greg Thelenc4843a72015-05-22 17:13:16 -0400274
Greg Thelenc4843a72015-05-22 17:13:16 -0400275 spin_lock_irqsave(&mapping->tree_lock, flags);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700276 __delete_from_page_cache(page, NULL);
Greg Thelenc4843a72015-05-22 17:13:16 -0400277 spin_unlock_irqrestore(&mapping->tree_lock, flags);
Linus Torvalds6072d132010-12-01 13:35:19 -0500278
279 if (freepage)
280 freepage(page);
Kirill A. Shutemov83929372016-07-26 15:26:04 -0700281
282 if (PageTransHuge(page) && !PageHuge(page)) {
283 page_ref_sub(page, HPAGE_PMD_NR);
284 VM_BUG_ON_PAGE(page_count(page) <= 0, page);
285 } else {
286 put_page(page);
287 }
Minchan Kim97cecb52011-03-22 16:30:53 -0700288}
289EXPORT_SYMBOL(delete_from_page_cache);
290
Miklos Szeredid72d9e22016-07-29 14:10:57 +0200291int filemap_check_errors(struct address_space *mapping)
Dmitry Monakhov865ffef32013-04-29 15:08:42 -0700292{
293 int ret = 0;
294 /* Check for outstanding write errors */
Jens Axboe7fcbbaf2014-05-22 11:54:16 -0700295 if (test_bit(AS_ENOSPC, &mapping->flags) &&
296 test_and_clear_bit(AS_ENOSPC, &mapping->flags))
Dmitry Monakhov865ffef32013-04-29 15:08:42 -0700297 ret = -ENOSPC;
Jens Axboe7fcbbaf2014-05-22 11:54:16 -0700298 if (test_bit(AS_EIO, &mapping->flags) &&
299 test_and_clear_bit(AS_EIO, &mapping->flags))
Dmitry Monakhov865ffef32013-04-29 15:08:42 -0700300 ret = -EIO;
301 return ret;
302}
Miklos Szeredid72d9e22016-07-29 14:10:57 +0200303EXPORT_SYMBOL(filemap_check_errors);
Dmitry Monakhov865ffef32013-04-29 15:08:42 -0700304
Jeff Layton76341ca2017-07-06 07:02:22 -0400305static int filemap_check_and_keep_errors(struct address_space *mapping)
306{
307 /* Check for outstanding write errors */
308 if (test_bit(AS_EIO, &mapping->flags))
309 return -EIO;
310 if (test_bit(AS_ENOSPC, &mapping->flags))
311 return -ENOSPC;
312 return 0;
313}
314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315/**
Randy Dunlap485bb992006-06-23 02:03:49 -0700316 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
Martin Waitz67be2dd2005-05-01 08:59:26 -0700317 * @mapping: address space structure to write
318 * @start: offset in bytes where the range starts
Andrew Morton469eb4d2006-03-24 03:17:45 -0800319 * @end: offset in bytes where the range ends (inclusive)
Martin Waitz67be2dd2005-05-01 08:59:26 -0700320 * @sync_mode: enable synchronous operation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 *
Randy Dunlap485bb992006-06-23 02:03:49 -0700322 * Start writeback against all of a mapping's dirty pages that lie
323 * within the byte offsets <start, end> inclusive.
324 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
Randy Dunlap485bb992006-06-23 02:03:49 -0700326 * opposed to a regular memory cleansing writeback. The difference between
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 * these two operations is that if a dirty page/buffer is encountered, it must
328 * be waited upon, and not just skipped over.
329 */
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800330int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
331 loff_t end, int sync_mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
333 int ret;
334 struct writeback_control wbc = {
335 .sync_mode = sync_mode,
Nick Piggin05fe4782009-01-06 14:39:08 -0800336 .nr_to_write = LONG_MAX,
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700337 .range_start = start,
338 .range_end = end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 };
340
341 if (!mapping_cap_writeback_dirty(mapping))
342 return 0;
343
Tejun Heob16b1de2015-06-02 08:39:48 -0600344 wbc_attach_fdatawrite_inode(&wbc, mapping->host);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 ret = do_writepages(mapping, &wbc);
Tejun Heob16b1de2015-06-02 08:39:48 -0600346 wbc_detach_inode(&wbc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 return ret;
348}
349
350static inline int __filemap_fdatawrite(struct address_space *mapping,
351 int sync_mode)
352{
OGAWA Hirofumi111ebb62006-06-23 02:03:26 -0700353 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354}
355
356int filemap_fdatawrite(struct address_space *mapping)
357{
358 return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
359}
360EXPORT_SYMBOL(filemap_fdatawrite);
361
Jan Karaf4c0a0f2008-07-11 19:27:31 -0400362int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
Andrew Mortonebcf28e2006-03-24 03:18:04 -0800363 loff_t end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
365 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
366}
Jan Karaf4c0a0f2008-07-11 19:27:31 -0400367EXPORT_SYMBOL(filemap_fdatawrite_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Randy Dunlap485bb992006-06-23 02:03:49 -0700369/**
370 * filemap_flush - mostly a non-blocking flush
371 * @mapping: target address_space
372 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 * This is a mostly non-blocking flush. Not suitable for data-integrity
374 * purposes - I/O may not be started against all dirty pages.
375 */
376int filemap_flush(struct address_space *mapping)
377{
378 return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
379}
380EXPORT_SYMBOL(filemap_flush);
381
Goldwyn Rodrigues7fc9e472017-06-20 07:05:41 -0500382/**
383 * filemap_range_has_page - check if a page exists in range.
384 * @mapping: address space within which to check
385 * @start_byte: offset in bytes where the range starts
386 * @end_byte: offset in bytes where the range ends (inclusive)
387 *
388 * Find at least one page in the range supplied, usually used to check if
389 * direct writing in this range will trigger a writeback.
390 */
391bool filemap_range_has_page(struct address_space *mapping,
392 loff_t start_byte, loff_t end_byte)
393{
394 pgoff_t index = start_byte >> PAGE_SHIFT;
395 pgoff_t end = end_byte >> PAGE_SHIFT;
Jan Karaf7b68042017-09-06 16:21:40 -0700396 struct page *page;
Goldwyn Rodrigues7fc9e472017-06-20 07:05:41 -0500397
398 if (end_byte < start_byte)
399 return false;
400
401 if (mapping->nrpages == 0)
402 return false;
403
Jan Karaf7b68042017-09-06 16:21:40 -0700404 if (!find_get_pages_range(mapping, &index, end, 1, &page))
Goldwyn Rodrigues7fc9e472017-06-20 07:05:41 -0500405 return false;
Jan Karaf7b68042017-09-06 16:21:40 -0700406 put_page(page);
407 return true;
Goldwyn Rodrigues7fc9e472017-06-20 07:05:41 -0500408}
409EXPORT_SYMBOL(filemap_range_has_page);
410
Jeff Layton5e8fcc12017-07-06 07:02:24 -0400411static void __filemap_fdatawait_range(struct address_space *mapping,
Junichi Nomuraaa750fd2015-11-05 18:47:23 -0800412 loff_t start_byte, loff_t end_byte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300414 pgoff_t index = start_byte >> PAGE_SHIFT;
415 pgoff_t end = end_byte >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 struct pagevec pvec;
417 int nr_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Christoph Hellwig94004ed2009-09-30 22:16:33 +0200419 if (end_byte < start_byte)
Jeff Layton5e8fcc12017-07-06 07:02:24 -0400420 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
422 pagevec_init(&pvec, 0);
Jan Kara312e9d22017-11-15 17:35:05 -0800423 while (index <= end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 unsigned i;
425
Jan Kara312e9d22017-11-15 17:35:05 -0800426 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
427 end, PAGECACHE_TAG_WRITEBACK, PAGEVEC_SIZE);
428 if (!nr_pages)
429 break;
430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 for (i = 0; i < nr_pages; i++) {
432 struct page *page = pvec.pages[i];
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 wait_on_page_writeback(page);
Jeff Layton5e8fcc12017-07-06 07:02:24 -0400435 ClearPageError(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 }
437 pagevec_release(&pvec);
438 cond_resched();
439 }
Junichi Nomuraaa750fd2015-11-05 18:47:23 -0800440}
441
442/**
443 * filemap_fdatawait_range - wait for writeback to complete
444 * @mapping: address space structure to wait for
445 * @start_byte: offset in bytes where the range starts
446 * @end_byte: offset in bytes where the range ends (inclusive)
447 *
448 * Walk the list of under-writeback pages of the given address space
449 * in the given range and wait for all of them. Check error status of
450 * the address space and return it.
451 *
452 * Since the error status of the address space is cleared by this function,
453 * callers are responsible for checking the return value and handling and/or
454 * reporting the error.
455 */
456int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
457 loff_t end_byte)
458{
Jeff Layton5e8fcc12017-07-06 07:02:24 -0400459 __filemap_fdatawait_range(mapping, start_byte, end_byte);
460 return filemap_check_errors(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461}
Jan Karad3bccb6f2009-08-17 19:30:27 +0200462EXPORT_SYMBOL(filemap_fdatawait_range);
463
464/**
Jeff Laytona823e452017-07-28 07:24:43 -0400465 * file_fdatawait_range - wait for writeback to complete
466 * @file: file pointing to address space structure to wait for
467 * @start_byte: offset in bytes where the range starts
468 * @end_byte: offset in bytes where the range ends (inclusive)
469 *
470 * Walk the list of under-writeback pages of the address space that file
471 * refers to, in the given range and wait for all of them. Check error
472 * status of the address space vs. the file->f_wb_err cursor and return it.
473 *
474 * Since the error status of the file is advanced by this function,
475 * callers are responsible for checking the return value and handling and/or
476 * reporting the error.
477 */
478int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
479{
480 struct address_space *mapping = file->f_mapping;
481
482 __filemap_fdatawait_range(mapping, start_byte, end_byte);
483 return file_check_and_advance_wb_err(file);
484}
485EXPORT_SYMBOL(file_fdatawait_range);
486
487/**
Junichi Nomuraaa750fd2015-11-05 18:47:23 -0800488 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
489 * @mapping: address space structure to wait for
490 *
491 * Walk the list of under-writeback pages of the given address space
492 * and wait for all of them. Unlike filemap_fdatawait(), this function
493 * does not clear error status of the address space.
494 *
495 * Use this function if callers don't handle errors themselves. Expected
496 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
497 * fsfreeze(8)
498 */
Jeff Layton76341ca2017-07-06 07:02:22 -0400499int filemap_fdatawait_keep_errors(struct address_space *mapping)
Junichi Nomuraaa750fd2015-11-05 18:47:23 -0800500{
Jeff Laytonffb959b2017-07-31 10:29:38 -0400501 __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
Jeff Layton76341ca2017-07-06 07:02:22 -0400502 return filemap_check_and_keep_errors(mapping);
Junichi Nomuraaa750fd2015-11-05 18:47:23 -0800503}
Jeff Layton76341ca2017-07-06 07:02:22 -0400504EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
Junichi Nomuraaa750fd2015-11-05 18:47:23 -0800505
Jeff Layton9326c9b2017-07-26 10:21:11 -0400506static bool mapping_needs_writeback(struct address_space *mapping)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Jeff Layton9326c9b2017-07-26 10:21:11 -0400508 return (!dax_mapping(mapping) && mapping->nrpages) ||
509 (dax_mapping(mapping) && mapping->nrexceptional);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
512int filemap_write_and_wait(struct address_space *mapping)
513{
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800514 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Jeff Layton9326c9b2017-07-26 10:21:11 -0400516 if (mapping_needs_writeback(mapping)) {
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800517 err = filemap_fdatawrite(mapping);
518 /*
519 * Even if the above returned error, the pages may be
520 * written partially (e.g. -ENOSPC), so we wait for it.
521 * But the -EIO is special case, it may indicate the worst
522 * thing (e.g. bug) happened, so we avoid waiting for it.
523 */
524 if (err != -EIO) {
525 int err2 = filemap_fdatawait(mapping);
526 if (!err)
527 err = err2;
Jeff Laytoncbeaf952017-07-06 07:02:23 -0400528 } else {
529 /* Clear any previously stored errors */
530 filemap_check_errors(mapping);
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800531 }
Dmitry Monakhov865ffef32013-04-29 15:08:42 -0700532 } else {
533 err = filemap_check_errors(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 }
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800535 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800537EXPORT_SYMBOL(filemap_write_and_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Randy Dunlap485bb992006-06-23 02:03:49 -0700539/**
540 * filemap_write_and_wait_range - write out & wait on a file range
541 * @mapping: the address_space for the pages
542 * @lstart: offset in bytes where the range starts
543 * @lend: offset in bytes where the range ends (inclusive)
544 *
Andrew Morton469eb4d2006-03-24 03:17:45 -0800545 * Write out and wait upon file offsets lstart->lend, inclusive.
546 *
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -0300547 * Note that @lend is inclusive (describes the last byte to be written) so
Andrew Morton469eb4d2006-03-24 03:17:45 -0800548 * that this function can be used to write to the very end-of-file (end = -1).
549 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550int filemap_write_and_wait_range(struct address_space *mapping,
551 loff_t lstart, loff_t lend)
552{
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800553 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Jeff Layton9326c9b2017-07-26 10:21:11 -0400555 if (mapping_needs_writeback(mapping)) {
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800556 err = __filemap_fdatawrite_range(mapping, lstart, lend,
557 WB_SYNC_ALL);
558 /* See comment of filemap_write_and_wait() */
559 if (err != -EIO) {
Christoph Hellwig94004ed2009-09-30 22:16:33 +0200560 int err2 = filemap_fdatawait_range(mapping,
561 lstart, lend);
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800562 if (!err)
563 err = err2;
Jeff Laytoncbeaf952017-07-06 07:02:23 -0400564 } else {
565 /* Clear any previously stored errors */
566 filemap_check_errors(mapping);
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800567 }
Dmitry Monakhov865ffef32013-04-29 15:08:42 -0700568 } else {
569 err = filemap_check_errors(mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
OGAWA Hirofumi28fd1292006-01-08 01:02:14 -0800571 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
Chris Masonf6995582009-04-15 13:22:37 -0400573EXPORT_SYMBOL(filemap_write_and_wait_range);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
Jeff Layton5660e132017-07-06 07:02:25 -0400575void __filemap_set_wb_err(struct address_space *mapping, int err)
576{
Jeff Layton3acdfd22017-07-24 06:22:15 -0400577 errseq_t eseq = errseq_set(&mapping->wb_err, err);
Jeff Layton5660e132017-07-06 07:02:25 -0400578
579 trace_filemap_set_wb_err(mapping, eseq);
580}
581EXPORT_SYMBOL(__filemap_set_wb_err);
582
583/**
584 * file_check_and_advance_wb_err - report wb error (if any) that was previously
585 * and advance wb_err to current one
586 * @file: struct file on which the error is being reported
587 *
588 * When userland calls fsync (or something like nfsd does the equivalent), we
589 * want to report any writeback errors that occurred since the last fsync (or
590 * since the file was opened if there haven't been any).
591 *
592 * Grab the wb_err from the mapping. If it matches what we have in the file,
593 * then just quickly return 0. The file is all caught up.
594 *
595 * If it doesn't match, then take the mapping value, set the "seen" flag in
596 * it and try to swap it into place. If it works, or another task beat us
597 * to it with the new value, then update the f_wb_err and return the error
598 * portion. The error at this point must be reported via proper channels
599 * (a'la fsync, or NFS COMMIT operation, etc.).
600 *
601 * While we handle mapping->wb_err with atomic operations, the f_wb_err
602 * value is protected by the f_lock since we must ensure that it reflects
603 * the latest value swapped in for this file descriptor.
604 */
605int file_check_and_advance_wb_err(struct file *file)
606{
607 int err = 0;
608 errseq_t old = READ_ONCE(file->f_wb_err);
609 struct address_space *mapping = file->f_mapping;
610
611 /* Locklessly handle the common case where nothing has changed */
612 if (errseq_check(&mapping->wb_err, old)) {
613 /* Something changed, must use slow path */
614 spin_lock(&file->f_lock);
615 old = file->f_wb_err;
616 err = errseq_check_and_advance(&mapping->wb_err,
617 &file->f_wb_err);
618 trace_file_check_and_advance_wb_err(file, old);
619 spin_unlock(&file->f_lock);
620 }
Jeff Laytonf4e222c2017-10-03 16:15:25 -0700621
622 /*
623 * We're mostly using this function as a drop in replacement for
624 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
625 * that the legacy code would have had on these flags.
626 */
627 clear_bit(AS_EIO, &mapping->flags);
628 clear_bit(AS_ENOSPC, &mapping->flags);
Jeff Layton5660e132017-07-06 07:02:25 -0400629 return err;
630}
631EXPORT_SYMBOL(file_check_and_advance_wb_err);
632
633/**
634 * file_write_and_wait_range - write out & wait on a file range
635 * @file: file pointing to address_space with pages
636 * @lstart: offset in bytes where the range starts
637 * @lend: offset in bytes where the range ends (inclusive)
638 *
639 * Write out and wait upon file offsets lstart->lend, inclusive.
640 *
641 * Note that @lend is inclusive (describes the last byte to be written) so
642 * that this function can be used to write to the very end-of-file (end = -1).
643 *
644 * After writing out and waiting on the data, we check and advance the
645 * f_wb_err cursor to the latest value, and return any errors detected there.
646 */
647int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
648{
649 int err = 0, err2;
650 struct address_space *mapping = file->f_mapping;
651
Jeff Layton9326c9b2017-07-26 10:21:11 -0400652 if (mapping_needs_writeback(mapping)) {
Jeff Layton5660e132017-07-06 07:02:25 -0400653 err = __filemap_fdatawrite_range(mapping, lstart, lend,
654 WB_SYNC_ALL);
655 /* See comment of filemap_write_and_wait() */
656 if (err != -EIO)
657 __filemap_fdatawait_range(mapping, lstart, lend);
658 }
659 err2 = file_check_and_advance_wb_err(file);
660 if (!err)
661 err = err2;
662 return err;
663}
664EXPORT_SYMBOL(file_write_and_wait_range);
665
Randy Dunlap485bb992006-06-23 02:03:49 -0700666/**
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700667 * replace_page_cache_page - replace a pagecache page with a new one
668 * @old: page to be replaced
669 * @new: page to replace with
670 * @gfp_mask: allocation mode
671 *
672 * This function replaces a page in the pagecache with a new one. On
673 * success it acquires the pagecache reference for the new page and
674 * drops it for the old page. Both the old and new pages must be
675 * locked. This function does not add the new page to the LRU, the
676 * caller must do that.
677 *
678 * The remove + add is atomic. The only way this function can fail is
679 * memory allocation failure.
680 */
681int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
682{
683 int error;
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700684
Sasha Levin309381fea2014-01-23 15:52:54 -0800685 VM_BUG_ON_PAGE(!PageLocked(old), old);
686 VM_BUG_ON_PAGE(!PageLocked(new), new);
687 VM_BUG_ON_PAGE(new->mapping, new);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700688
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700689 error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
690 if (!error) {
691 struct address_space *mapping = old->mapping;
692 void (*freepage)(struct page *);
Greg Thelenc4843a72015-05-22 17:13:16 -0400693 unsigned long flags;
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700694
695 pgoff_t offset = old->index;
696 freepage = mapping->a_ops->freepage;
697
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300698 get_page(new);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700699 new->mapping = mapping;
700 new->index = offset;
701
Greg Thelenc4843a72015-05-22 17:13:16 -0400702 spin_lock_irqsave(&mapping->tree_lock, flags);
Johannes Weiner62cccb82016-03-15 14:57:22 -0700703 __delete_from_page_cache(old, NULL);
Johannes Weiner22f2ac52016-09-30 15:11:29 -0700704 error = page_cache_tree_insert(mapping, new, NULL);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700705 BUG_ON(error);
Michal Hocko4165b9b2015-06-24 16:57:24 -0700706
707 /*
708 * hugetlb pages do not participate in page cache accounting.
709 */
710 if (!PageHuge(new))
Mel Gorman11fb9982016-07-28 15:46:20 -0700711 __inc_node_page_state(new, NR_FILE_PAGES);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700712 if (PageSwapBacked(new))
Mel Gorman11fb9982016-07-28 15:46:20 -0700713 __inc_node_page_state(new, NR_SHMEM);
Greg Thelenc4843a72015-05-22 17:13:16 -0400714 spin_unlock_irqrestore(&mapping->tree_lock, flags);
Johannes Weiner6a93ca82016-03-15 14:57:19 -0700715 mem_cgroup_migrate(old, new);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700716 radix_tree_preload_end();
717 if (freepage)
718 freepage(old);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300719 put_page(old);
Miklos Szeredief6a3c62011-03-22 16:30:52 -0700720 }
721
722 return error;
723}
724EXPORT_SYMBOL_GPL(replace_page_cache_page);
725
Johannes Weinera5289102014-04-03 14:47:51 -0700726static int __add_to_page_cache_locked(struct page *page,
727 struct address_space *mapping,
728 pgoff_t offset, gfp_t gfp_mask,
729 void **shadowp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730{
Johannes Weiner00501b52014-08-08 14:19:20 -0700731 int huge = PageHuge(page);
732 struct mem_cgroup *memcg;
Nick Piggine2867812008-07-25 19:45:30 -0700733 int error;
734
Sasha Levin309381fea2014-01-23 15:52:54 -0800735 VM_BUG_ON_PAGE(!PageLocked(page), page);
736 VM_BUG_ON_PAGE(PageSwapBacked(page), page);
Nick Piggine2867812008-07-25 19:45:30 -0700737
Johannes Weiner00501b52014-08-08 14:19:20 -0700738 if (!huge) {
739 error = mem_cgroup_try_charge(page, current->mm,
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800740 gfp_mask, &memcg, false);
Johannes Weiner00501b52014-08-08 14:19:20 -0700741 if (error)
742 return error;
743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
Jan Kara5e4c0d972013-09-11 14:26:05 -0700745 error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
Kirill A. Shutemov66a0c8e2013-09-12 15:13:59 -0700746 if (error) {
Johannes Weiner00501b52014-08-08 14:19:20 -0700747 if (!huge)
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800748 mem_cgroup_cancel_charge(page, memcg, false);
Kirill A. Shutemov66a0c8e2013-09-12 15:13:59 -0700749 return error;
750 }
751
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300752 get_page(page);
Kirill A. Shutemov66a0c8e2013-09-12 15:13:59 -0700753 page->mapping = mapping;
754 page->index = offset;
755
756 spin_lock_irq(&mapping->tree_lock);
Johannes Weinera5289102014-04-03 14:47:51 -0700757 error = page_cache_tree_insert(mapping, page, shadowp);
Kirill A. Shutemov66a0c8e2013-09-12 15:13:59 -0700758 radix_tree_preload_end();
759 if (unlikely(error))
760 goto err_insert;
Michal Hocko4165b9b2015-06-24 16:57:24 -0700761
762 /* hugetlb pages do not participate in page cache accounting. */
763 if (!huge)
Mel Gorman11fb9982016-07-28 15:46:20 -0700764 __inc_node_page_state(page, NR_FILE_PAGES);
Kirill A. Shutemov66a0c8e2013-09-12 15:13:59 -0700765 spin_unlock_irq(&mapping->tree_lock);
Johannes Weiner00501b52014-08-08 14:19:20 -0700766 if (!huge)
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800767 mem_cgroup_commit_charge(page, memcg, false, false);
Kirill A. Shutemov66a0c8e2013-09-12 15:13:59 -0700768 trace_mm_filemap_add_to_page_cache(page);
769 return 0;
770err_insert:
771 page->mapping = NULL;
772 /* Leave page->index set: truncation relies upon it */
773 spin_unlock_irq(&mapping->tree_lock);
Johannes Weiner00501b52014-08-08 14:19:20 -0700774 if (!huge)
Kirill A. Shutemovf627c2f2016-01-15 16:52:20 -0800775 mem_cgroup_cancel_charge(page, memcg, false);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300776 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 return error;
778}
Johannes Weinera5289102014-04-03 14:47:51 -0700779
780/**
781 * add_to_page_cache_locked - add a locked page to the pagecache
782 * @page: page to add
783 * @mapping: the page's address_space
784 * @offset: page index
785 * @gfp_mask: page allocation mode
786 *
787 * This function is used to add a page to the pagecache. It must be locked.
788 * This function does not add the page to the LRU. The caller must do that.
789 */
790int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
791 pgoff_t offset, gfp_t gfp_mask)
792{
793 return __add_to_page_cache_locked(page, mapping, offset,
794 gfp_mask, NULL);
795}
Nick Piggine2867812008-07-25 19:45:30 -0700796EXPORT_SYMBOL(add_to_page_cache_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
798int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
Al Viro6daa0e22005-10-21 03:18:50 -0400799 pgoff_t offset, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Johannes Weinera5289102014-04-03 14:47:51 -0700801 void *shadow = NULL;
Rik van Riel4f98a2f2008-10-18 20:26:32 -0700802 int ret;
803
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800804 __SetPageLocked(page);
Johannes Weinera5289102014-04-03 14:47:51 -0700805 ret = __add_to_page_cache_locked(page, mapping, offset,
806 gfp_mask, &shadow);
807 if (unlikely(ret))
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -0800808 __ClearPageLocked(page);
Johannes Weinera5289102014-04-03 14:47:51 -0700809 else {
810 /*
811 * The page might have been evicted from cache only
812 * recently, in which case it should be activated like
813 * any other repeatedly accessed page.
Rik van Rielf0281a02016-05-20 16:56:25 -0700814 * The exception is pages getting rewritten; evicting other
815 * data from the working set, only to cache data that will
816 * get overwritten with something else, is a waste of memory.
Johannes Weinera5289102014-04-03 14:47:51 -0700817 */
Rik van Rielf0281a02016-05-20 16:56:25 -0700818 if (!(gfp_mask & __GFP_WRITE) &&
819 shadow && workingset_refault(shadow)) {
Johannes Weinera5289102014-04-03 14:47:51 -0700820 SetPageActive(page);
821 workingset_activation(page);
822 } else
823 ClearPageActive(page);
824 lru_cache_add(page);
825 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 return ret;
827}
Evgeniy Polyakov18bc0bb2009-02-09 17:02:42 +0300828EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Paul Jackson44110fe2006-03-24 03:16:04 -0800830#ifdef CONFIG_NUMA
Nick Piggin2ae88142006-10-28 10:38:23 -0700831struct page *__page_cache_alloc(gfp_t gfp)
Paul Jackson44110fe2006-03-24 03:16:04 -0800832{
Miao Xiec0ff7452010-05-24 14:32:08 -0700833 int n;
834 struct page *page;
835
Paul Jackson44110fe2006-03-24 03:16:04 -0800836 if (cpuset_do_page_mem_spread()) {
Mel Gormancc9a6c82012-03-21 16:34:11 -0700837 unsigned int cpuset_mems_cookie;
838 do {
Mel Gormand26914d2014-04-03 14:47:24 -0700839 cpuset_mems_cookie = read_mems_allowed_begin();
Mel Gormancc9a6c82012-03-21 16:34:11 -0700840 n = cpuset_mem_spread_node();
Vlastimil Babka96db8002015-09-08 15:03:50 -0700841 page = __alloc_pages_node(n, gfp, 0);
Mel Gormand26914d2014-04-03 14:47:24 -0700842 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
Mel Gormancc9a6c82012-03-21 16:34:11 -0700843
Miao Xiec0ff7452010-05-24 14:32:08 -0700844 return page;
Paul Jackson44110fe2006-03-24 03:16:04 -0800845 }
Nick Piggin2ae88142006-10-28 10:38:23 -0700846 return alloc_pages(gfp, 0);
Paul Jackson44110fe2006-03-24 03:16:04 -0800847}
Nick Piggin2ae88142006-10-28 10:38:23 -0700848EXPORT_SYMBOL(__page_cache_alloc);
Paul Jackson44110fe2006-03-24 03:16:04 -0800849#endif
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851/*
852 * In order to wait for pages to become available there must be
853 * waitqueues associated with pages. By using a hash table of
854 * waitqueues where the bucket discipline is to maintain all
855 * waiters on the same queue and wake all when any of the pages
856 * become available, and for the woken contexts to check to be
857 * sure the appropriate page became available, this saves space
858 * at a cost of "thundering herd" phenomena during rare hash
859 * collisions.
860 */
Nicholas Piggin62906022016-12-25 13:00:30 +1000861#define PAGE_WAIT_TABLE_BITS 8
862#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
863static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
864
865static wait_queue_head_t *page_waitqueue(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866{
Nicholas Piggin62906022016-12-25 13:00:30 +1000867 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
Nicholas Piggin62906022016-12-25 13:00:30 +1000869
870void __init pagecache_init(void)
871{
872 int i;
873
874 for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
875 init_waitqueue_head(&page_wait_table[i]);
876
877 page_writeback_init();
878}
879
Linus Torvalds3510ca22017-08-27 13:55:12 -0700880/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
Nicholas Piggin62906022016-12-25 13:00:30 +1000881struct wait_page_key {
882 struct page *page;
883 int bit_nr;
884 int page_match;
885};
886
887struct wait_page_queue {
888 struct page *page;
889 int bit_nr;
Ingo Molnarac6424b2017-06-20 12:06:13 +0200890 wait_queue_entry_t wait;
Nicholas Piggin62906022016-12-25 13:00:30 +1000891};
892
Ingo Molnarac6424b2017-06-20 12:06:13 +0200893static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
Nicholas Piggin62906022016-12-25 13:00:30 +1000894{
895 struct wait_page_key *key = arg;
896 struct wait_page_queue *wait_page
897 = container_of(wait, struct wait_page_queue, wait);
898
899 if (wait_page->page != key->page)
900 return 0;
901 key->page_match = 1;
902
903 if (wait_page->bit_nr != key->bit_nr)
904 return 0;
Linus Torvalds3510ca22017-08-27 13:55:12 -0700905
906 /* Stop walking if it's locked */
Nicholas Piggin62906022016-12-25 13:00:30 +1000907 if (test_bit(key->bit_nr, &key->page->flags))
Linus Torvalds3510ca22017-08-27 13:55:12 -0700908 return -1;
Nicholas Piggin62906022016-12-25 13:00:30 +1000909
910 return autoremove_wake_function(wait, mode, sync, key);
911}
912
Nicholas Piggin74d81bf2017-02-22 15:44:41 -0800913static void wake_up_page_bit(struct page *page, int bit_nr)
Nicholas Piggin62906022016-12-25 13:00:30 +1000914{
915 wait_queue_head_t *q = page_waitqueue(page);
916 struct wait_page_key key;
917 unsigned long flags;
Tim Chen11a19c72017-08-25 09:13:55 -0700918 wait_queue_entry_t bookmark;
Nicholas Piggin62906022016-12-25 13:00:30 +1000919
920 key.page = page;
921 key.bit_nr = bit_nr;
922 key.page_match = 0;
923
Tim Chen11a19c72017-08-25 09:13:55 -0700924 bookmark.flags = 0;
925 bookmark.private = NULL;
926 bookmark.func = NULL;
927 INIT_LIST_HEAD(&bookmark.entry);
928
Nicholas Piggin62906022016-12-25 13:00:30 +1000929 spin_lock_irqsave(&q->lock, flags);
Tim Chen11a19c72017-08-25 09:13:55 -0700930 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
931
932 while (bookmark.flags & WQ_FLAG_BOOKMARK) {
933 /*
934 * Take a breather from holding the lock,
935 * allow pages that finish wake up asynchronously
936 * to acquire the lock and remove themselves
937 * from wait queue
938 */
939 spin_unlock_irqrestore(&q->lock, flags);
940 cpu_relax();
941 spin_lock_irqsave(&q->lock, flags);
942 __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
943 }
944
Nicholas Piggin62906022016-12-25 13:00:30 +1000945 /*
946 * It is possible for other pages to have collided on the waitqueue
947 * hash, so in that case check for a page match. That prevents a long-
948 * term waiter
949 *
950 * It is still possible to miss a case here, when we woke page waiters
951 * and removed them from the waitqueue, but there are still other
952 * page waiters.
953 */
954 if (!waitqueue_active(q) || !key.page_match) {
955 ClearPageWaiters(page);
956 /*
957 * It's possible to miss clearing Waiters here, when we woke
958 * our page waiters, but the hashed waitqueue has waiters for
959 * other pages on it.
960 *
961 * That's okay, it's a rare case. The next waker will clear it.
962 */
963 }
964 spin_unlock_irqrestore(&q->lock, flags);
965}
Nicholas Piggin74d81bf2017-02-22 15:44:41 -0800966
967static void wake_up_page(struct page *page, int bit)
968{
969 if (!PageWaiters(page))
970 return;
971 wake_up_page_bit(page, bit);
972}
Nicholas Piggin62906022016-12-25 13:00:30 +1000973
974static inline int wait_on_page_bit_common(wait_queue_head_t *q,
975 struct page *page, int bit_nr, int state, bool lock)
976{
977 struct wait_page_queue wait_page;
Ingo Molnarac6424b2017-06-20 12:06:13 +0200978 wait_queue_entry_t *wait = &wait_page.wait;
Nicholas Piggin62906022016-12-25 13:00:30 +1000979 int ret = 0;
980
981 init_wait(wait);
Linus Torvalds3510ca22017-08-27 13:55:12 -0700982 wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0;
Nicholas Piggin62906022016-12-25 13:00:30 +1000983 wait->func = wake_page_function;
984 wait_page.page = page;
985 wait_page.bit_nr = bit_nr;
986
987 for (;;) {
988 spin_lock_irq(&q->lock);
989
Ingo Molnar2055da92017-06-20 12:06:46 +0200990 if (likely(list_empty(&wait->entry))) {
Linus Torvalds3510ca22017-08-27 13:55:12 -0700991 __add_wait_queue_entry_tail(q, wait);
Nicholas Piggin62906022016-12-25 13:00:30 +1000992 SetPageWaiters(page);
993 }
994
995 set_current_state(state);
996
997 spin_unlock_irq(&q->lock);
998
999 if (likely(test_bit(bit_nr, &page->flags))) {
1000 io_schedule();
Nicholas Piggin62906022016-12-25 13:00:30 +10001001 }
1002
1003 if (lock) {
1004 if (!test_and_set_bit_lock(bit_nr, &page->flags))
1005 break;
1006 } else {
1007 if (!test_bit(bit_nr, &page->flags))
1008 break;
1009 }
Linus Torvaldsa8b169a2017-08-27 16:25:09 -07001010
1011 if (unlikely(signal_pending_state(state, current))) {
1012 ret = -EINTR;
1013 break;
1014 }
Nicholas Piggin62906022016-12-25 13:00:30 +10001015 }
1016
1017 finish_wait(q, wait);
1018
1019 /*
1020 * A signal could leave PageWaiters set. Clearing it here if
1021 * !waitqueue_active would be possible (by open-coding finish_wait),
1022 * but still fail to catch it in the case of wait hash collision. We
1023 * already can fail to clear wait hash collision cases, so don't
1024 * bother with signals either.
1025 */
1026
1027 return ret;
1028}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029
Harvey Harrison920c7a52008-02-04 22:29:26 -08001030void wait_on_page_bit(struct page *page, int bit_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031{
Nicholas Piggin62906022016-12-25 13:00:30 +10001032 wait_queue_head_t *q = page_waitqueue(page);
1033 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034}
1035EXPORT_SYMBOL(wait_on_page_bit);
1036
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -07001037int wait_on_page_bit_killable(struct page *page, int bit_nr)
1038{
Nicholas Piggin62906022016-12-25 13:00:30 +10001039 wait_queue_head_t *q = page_waitqueue(page);
1040 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false);
KOSAKI Motohirof62e00c2011-05-24 17:11:29 -07001041}
1042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043/**
David Howells385e1ca5f2009-04-03 16:42:39 +01001044 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
Randy Dunlap697f6192009-04-13 14:39:54 -07001045 * @page: Page defining the wait queue of interest
1046 * @waiter: Waiter to add to the queue
David Howells385e1ca5f2009-04-03 16:42:39 +01001047 *
1048 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1049 */
Ingo Molnarac6424b2017-06-20 12:06:13 +02001050void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
David Howells385e1ca5f2009-04-03 16:42:39 +01001051{
1052 wait_queue_head_t *q = page_waitqueue(page);
1053 unsigned long flags;
1054
1055 spin_lock_irqsave(&q->lock, flags);
Linus Torvalds9c3a8152017-08-28 16:45:40 -07001056 __add_wait_queue_entry_tail(q, waiter);
Nicholas Piggin62906022016-12-25 13:00:30 +10001057 SetPageWaiters(page);
David Howells385e1ca5f2009-04-03 16:42:39 +01001058 spin_unlock_irqrestore(&q->lock, flags);
1059}
1060EXPORT_SYMBOL_GPL(add_page_wait_queue);
1061
Linus Torvaldsb91e1302016-12-27 11:40:38 -08001062#ifndef clear_bit_unlock_is_negative_byte
1063
1064/*
1065 * PG_waiters is the high bit in the same byte as PG_lock.
1066 *
1067 * On x86 (and on many other architectures), we can clear PG_lock and
1068 * test the sign bit at the same time. But if the architecture does
1069 * not support that special operation, we just do this all by hand
1070 * instead.
1071 *
1072 * The read of PG_waiters has to be after (or concurrently with) PG_locked
1073 * being cleared, but a memory barrier should be unneccssary since it is
1074 * in the same byte as PG_locked.
1075 */
1076static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1077{
1078 clear_bit_unlock(nr, mem);
1079 /* smp_mb__after_atomic(); */
Olof Johansson98473f92016-12-29 14:16:07 -08001080 return test_bit(PG_waiters, mem);
Linus Torvaldsb91e1302016-12-27 11:40:38 -08001081}
1082
1083#endif
1084
David Howells385e1ca5f2009-04-03 16:42:39 +01001085/**
Randy Dunlap485bb992006-06-23 02:03:49 -07001086 * unlock_page - unlock a locked page
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 * @page: the page
1088 *
1089 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
1090 * Also wakes sleepers in wait_on_page_writeback() because the wakeup
Masanari Iidada3dae52014-09-09 01:27:23 +09001091 * mechanism between PageLocked pages and PageWriteback pages is shared.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
1093 *
Linus Torvaldsb91e1302016-12-27 11:40:38 -08001094 * Note that this depends on PG_waiters being the sign bit in the byte
1095 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
1096 * clear the PG_locked bit and test PG_waiters at the same time fairly
1097 * portably (architectures that do LL/SC can test any bit, while x86 can
1098 * test the sign bit).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 */
Harvey Harrison920c7a52008-02-04 22:29:26 -08001100void unlock_page(struct page *page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101{
Linus Torvaldsb91e1302016-12-27 11:40:38 -08001102 BUILD_BUG_ON(PG_waiters != 7);
Kirill A. Shutemov48c935a2016-01-15 16:51:24 -08001103 page = compound_head(page);
Sasha Levin309381fea2014-01-23 15:52:54 -08001104 VM_BUG_ON_PAGE(!PageLocked(page), page);
Linus Torvaldsb91e1302016-12-27 11:40:38 -08001105 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
1106 wake_up_page_bit(page, PG_locked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107}
1108EXPORT_SYMBOL(unlock_page);
1109
Randy Dunlap485bb992006-06-23 02:03:49 -07001110/**
1111 * end_page_writeback - end writeback against a page
1112 * @page: the page
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 */
1114void end_page_writeback(struct page *page)
1115{
Mel Gorman888cf2d2014-06-04 16:10:34 -07001116 /*
1117 * TestClearPageReclaim could be used here but it is an atomic
1118 * operation and overkill in this particular case. Failing to
1119 * shuffle a page marked for immediate reclaim is too mild to
1120 * justify taking an atomic operation penalty at the end of
1121 * ever page writeback.
1122 */
1123 if (PageReclaim(page)) {
1124 ClearPageReclaim(page);
Miklos Szerediac6aadb2008-04-28 02:12:38 -07001125 rotate_reclaimable_page(page);
Mel Gorman888cf2d2014-06-04 16:10:34 -07001126 }
Miklos Szerediac6aadb2008-04-28 02:12:38 -07001127
1128 if (!test_clear_page_writeback(page))
1129 BUG();
1130
Peter Zijlstra4e857c52014-03-17 18:06:10 +01001131 smp_mb__after_atomic();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 wake_up_page(page, PG_writeback);
1133}
1134EXPORT_SYMBOL(end_page_writeback);
1135
Matthew Wilcox57d99842014-06-04 16:07:45 -07001136/*
1137 * After completing I/O on a page, call this routine to update the page
1138 * flags appropriately
1139 */
Jens Axboec11f0c02016-08-05 08:11:04 -06001140void page_endio(struct page *page, bool is_write, int err)
Matthew Wilcox57d99842014-06-04 16:07:45 -07001141{
Jens Axboec11f0c02016-08-05 08:11:04 -06001142 if (!is_write) {
Matthew Wilcox57d99842014-06-04 16:07:45 -07001143 if (!err) {
1144 SetPageUptodate(page);
1145 } else {
1146 ClearPageUptodate(page);
1147 SetPageError(page);
1148 }
1149 unlock_page(page);
Mike Christieabf54542016-08-04 14:23:34 -06001150 } else {
Matthew Wilcox57d99842014-06-04 16:07:45 -07001151 if (err) {
Minchan Kimdd8416c2017-02-24 14:59:59 -08001152 struct address_space *mapping;
1153
Matthew Wilcox57d99842014-06-04 16:07:45 -07001154 SetPageError(page);
Minchan Kimdd8416c2017-02-24 14:59:59 -08001155 mapping = page_mapping(page);
1156 if (mapping)
1157 mapping_set_error(mapping, err);
Matthew Wilcox57d99842014-06-04 16:07:45 -07001158 }
1159 end_page_writeback(page);
1160 }
1161}
1162EXPORT_SYMBOL_GPL(page_endio);
1163
Randy Dunlap485bb992006-06-23 02:03:49 -07001164/**
1165 * __lock_page - get a lock on the page, assuming we need to sleep to get it
Randy Dunlap87066752017-02-22 15:44:44 -08001166 * @__page: the page to lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 */
Nicholas Piggin62906022016-12-25 13:00:30 +10001168void __lock_page(struct page *__page)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
Nicholas Piggin62906022016-12-25 13:00:30 +10001170 struct page *page = compound_head(__page);
1171 wait_queue_head_t *q = page_waitqueue(page);
1172 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173}
1174EXPORT_SYMBOL(__lock_page);
1175
Nicholas Piggin62906022016-12-25 13:00:30 +10001176int __lock_page_killable(struct page *__page)
Matthew Wilcox2687a352007-12-06 11:18:49 -05001177{
Nicholas Piggin62906022016-12-25 13:00:30 +10001178 struct page *page = compound_head(__page);
1179 wait_queue_head_t *q = page_waitqueue(page);
1180 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true);
Matthew Wilcox2687a352007-12-06 11:18:49 -05001181}
Evgeniy Polyakov18bc0bb2009-02-09 17:02:42 +03001182EXPORT_SYMBOL_GPL(__lock_page_killable);
Matthew Wilcox2687a352007-12-06 11:18:49 -05001183
Paul Cassella9a95f3c2014-08-06 16:07:24 -07001184/*
1185 * Return values:
1186 * 1 - page is locked; mmap_sem is still held.
1187 * 0 - page is not locked.
1188 * mmap_sem has been released (up_read()), unless flags had both
1189 * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1190 * which case mmap_sem is still held.
1191 *
1192 * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
1193 * with the page locked and the mmap_sem unperturbed.
1194 */
Michel Lespinassed065bd82010-10-26 14:21:57 -07001195int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1196 unsigned int flags)
1197{
KOSAKI Motohiro37b23e02011-05-24 17:11:30 -07001198 if (flags & FAULT_FLAG_ALLOW_RETRY) {
1199 /*
1200 * CAUTION! In this case, mmap_sem is not released
1201 * even though return 0.
1202 */
1203 if (flags & FAULT_FLAG_RETRY_NOWAIT)
1204 return 0;
1205
1206 up_read(&mm->mmap_sem);
1207 if (flags & FAULT_FLAG_KILLABLE)
1208 wait_on_page_locked_killable(page);
1209 else
Gleb Natapov318b2752011-03-22 16:30:51 -07001210 wait_on_page_locked(page);
Michel Lespinassed065bd82010-10-26 14:21:57 -07001211 return 0;
KOSAKI Motohiro37b23e02011-05-24 17:11:30 -07001212 } else {
1213 if (flags & FAULT_FLAG_KILLABLE) {
1214 int ret;
1215
1216 ret = __lock_page_killable(page);
1217 if (ret) {
1218 up_read(&mm->mmap_sem);
1219 return 0;
1220 }
1221 } else
1222 __lock_page(page);
1223 return 1;
Michel Lespinassed065bd82010-10-26 14:21:57 -07001224 }
1225}
1226
Randy Dunlap485bb992006-06-23 02:03:49 -07001227/**
Johannes Weinere7b563b2014-04-03 14:47:44 -07001228 * page_cache_next_hole - find the next hole (not-present entry)
1229 * @mapping: mapping
1230 * @index: index
1231 * @max_scan: maximum range to search
1232 *
1233 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
1234 * lowest indexed hole.
1235 *
1236 * Returns: the index of the hole if found, otherwise returns an index
1237 * outside of the set specified (in which case 'return - index >=
1238 * max_scan' will be true). In rare cases of index wrap-around, 0 will
1239 * be returned.
1240 *
1241 * page_cache_next_hole may be called under rcu_read_lock. However,
1242 * like radix_tree_gang_lookup, this will not atomically search a
1243 * snapshot of the tree at a single point in time. For example, if a
1244 * hole is created at index 5, then subsequently a hole is created at
1245 * index 10, page_cache_next_hole covering both indexes may return 10
1246 * if called under rcu_read_lock.
1247 */
1248pgoff_t page_cache_next_hole(struct address_space *mapping,
1249 pgoff_t index, unsigned long max_scan)
1250{
1251 unsigned long i;
1252
1253 for (i = 0; i < max_scan; i++) {
Johannes Weiner0cd61442014-04-03 14:47:46 -07001254 struct page *page;
1255
1256 page = radix_tree_lookup(&mapping->page_tree, index);
1257 if (!page || radix_tree_exceptional_entry(page))
Johannes Weinere7b563b2014-04-03 14:47:44 -07001258 break;
1259 index++;
1260 if (index == 0)
1261 break;
1262 }
1263
1264 return index;
1265}
1266EXPORT_SYMBOL(page_cache_next_hole);
1267
1268/**
1269 * page_cache_prev_hole - find the prev hole (not-present entry)
1270 * @mapping: mapping
1271 * @index: index
1272 * @max_scan: maximum range to search
1273 *
1274 * Search backwards in the range [max(index-max_scan+1, 0), index] for
1275 * the first hole.
1276 *
1277 * Returns: the index of the hole if found, otherwise returns an index
1278 * outside of the set specified (in which case 'index - return >=
1279 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
1280 * will be returned.
1281 *
1282 * page_cache_prev_hole may be called under rcu_read_lock. However,
1283 * like radix_tree_gang_lookup, this will not atomically search a
1284 * snapshot of the tree at a single point in time. For example, if a
1285 * hole is created at index 10, then subsequently a hole is created at
1286 * index 5, page_cache_prev_hole covering both indexes may return 5 if
1287 * called under rcu_read_lock.
1288 */
1289pgoff_t page_cache_prev_hole(struct address_space *mapping,
1290 pgoff_t index, unsigned long max_scan)
1291{
1292 unsigned long i;
1293
1294 for (i = 0; i < max_scan; i++) {
Johannes Weiner0cd61442014-04-03 14:47:46 -07001295 struct page *page;
1296
1297 page = radix_tree_lookup(&mapping->page_tree, index);
1298 if (!page || radix_tree_exceptional_entry(page))
Johannes Weinere7b563b2014-04-03 14:47:44 -07001299 break;
1300 index--;
1301 if (index == ULONG_MAX)
1302 break;
1303 }
1304
1305 return index;
1306}
1307EXPORT_SYMBOL(page_cache_prev_hole);
1308
1309/**
Johannes Weiner0cd61442014-04-03 14:47:46 -07001310 * find_get_entry - find and get a page cache entry
Randy Dunlap485bb992006-06-23 02:03:49 -07001311 * @mapping: the address_space to search
Johannes Weiner0cd61442014-04-03 14:47:46 -07001312 * @offset: the page cache index
Randy Dunlap485bb992006-06-23 02:03:49 -07001313 *
Johannes Weiner0cd61442014-04-03 14:47:46 -07001314 * Looks up the page cache slot at @mapping & @offset. If there is a
1315 * page cache page, it is returned with an increased refcount.
1316 *
Johannes Weiner139b6a62014-05-06 12:50:05 -07001317 * If the slot holds a shadow entry of a previously evicted page, or a
1318 * swap entry from shmem/tmpfs, it is returned.
Johannes Weiner0cd61442014-04-03 14:47:46 -07001319 *
1320 * Otherwise, %NULL is returned.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 */
Johannes Weiner0cd61442014-04-03 14:47:46 -07001322struct page *find_get_entry(struct address_space *mapping, pgoff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323{
Nick Piggina60637c2008-07-25 19:45:31 -07001324 void **pagep;
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001325 struct page *head, *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326
Nick Piggina60637c2008-07-25 19:45:31 -07001327 rcu_read_lock();
1328repeat:
1329 page = NULL;
1330 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
1331 if (pagep) {
1332 page = radix_tree_deref_slot(pagep);
Nick Piggin27d20fd2010-11-11 14:05:19 -08001333 if (unlikely(!page))
1334 goto out;
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001335 if (radix_tree_exception(page)) {
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001336 if (radix_tree_deref_retry(page))
1337 goto repeat;
1338 /*
Johannes Weiner139b6a62014-05-06 12:50:05 -07001339 * A shadow entry of a recently evicted page,
1340 * or a swap entry from shmem/tmpfs. Return
1341 * it without attempting to raise page count.
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001342 */
1343 goto out;
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001344 }
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001345
1346 head = compound_head(page);
1347 if (!page_cache_get_speculative(head))
Nick Piggina60637c2008-07-25 19:45:31 -07001348 goto repeat;
1349
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001350 /* The page was split under us? */
1351 if (compound_head(page) != head) {
1352 put_page(head);
1353 goto repeat;
1354 }
1355
Nick Piggina60637c2008-07-25 19:45:31 -07001356 /*
1357 * Has the page moved?
1358 * This is part of the lockless pagecache protocol. See
1359 * include/linux/pagemap.h for details.
1360 */
1361 if (unlikely(page != *pagep)) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001362 put_page(head);
Nick Piggina60637c2008-07-25 19:45:31 -07001363 goto repeat;
1364 }
1365 }
Nick Piggin27d20fd2010-11-11 14:05:19 -08001366out:
Nick Piggina60637c2008-07-25 19:45:31 -07001367 rcu_read_unlock();
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 return page;
1370}
Johannes Weiner0cd61442014-04-03 14:47:46 -07001371EXPORT_SYMBOL(find_get_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
Randy Dunlap485bb992006-06-23 02:03:49 -07001373/**
Johannes Weiner0cd61442014-04-03 14:47:46 -07001374 * find_lock_entry - locate, pin and lock a page cache entry
1375 * @mapping: the address_space to search
1376 * @offset: the page cache index
1377 *
1378 * Looks up the page cache slot at @mapping & @offset. If there is a
1379 * page cache page, it is returned locked and with an increased
1380 * refcount.
1381 *
Johannes Weiner139b6a62014-05-06 12:50:05 -07001382 * If the slot holds a shadow entry of a previously evicted page, or a
1383 * swap entry from shmem/tmpfs, it is returned.
Johannes Weiner0cd61442014-04-03 14:47:46 -07001384 *
1385 * Otherwise, %NULL is returned.
1386 *
1387 * find_lock_entry() may sleep.
1388 */
1389struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390{
1391 struct page *page;
1392
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393repeat:
Johannes Weiner0cd61442014-04-03 14:47:46 -07001394 page = find_get_entry(mapping, offset);
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001395 if (page && !radix_tree_exception(page)) {
Nick Piggina60637c2008-07-25 19:45:31 -07001396 lock_page(page);
1397 /* Has the page been truncated? */
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001398 if (unlikely(page_mapping(page) != mapping)) {
Nick Piggina60637c2008-07-25 19:45:31 -07001399 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001400 put_page(page);
Nick Piggina60637c2008-07-25 19:45:31 -07001401 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 }
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001403 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 return page;
1406}
Johannes Weiner0cd61442014-04-03 14:47:46 -07001407EXPORT_SYMBOL(find_lock_entry);
1408
1409/**
Mel Gorman2457aec2014-06-04 16:10:31 -07001410 * pagecache_get_page - find and get a page reference
Johannes Weiner0cd61442014-04-03 14:47:46 -07001411 * @mapping: the address_space to search
1412 * @offset: the page index
Mel Gorman2457aec2014-06-04 16:10:31 -07001413 * @fgp_flags: PCG flags
Michal Hocko45f87de2014-12-29 20:30:35 +01001414 * @gfp_mask: gfp mask to use for the page cache data page allocation
Johannes Weiner0cd61442014-04-03 14:47:46 -07001415 *
Mel Gorman2457aec2014-06-04 16:10:31 -07001416 * Looks up the page cache slot at @mapping & @offset.
Johannes Weiner0cd61442014-04-03 14:47:46 -07001417 *
Randy Dunlap75325182014-07-30 16:08:37 -07001418 * PCG flags modify how the page is returned.
Johannes Weiner0cd61442014-04-03 14:47:46 -07001419 *
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -03001420 * @fgp_flags can be:
1421 *
1422 * - FGP_ACCESSED: the page will be marked accessed
1423 * - FGP_LOCK: Page is return locked
1424 * - FGP_CREAT: If page is not present then a new page is allocated using
1425 * @gfp_mask and added to the page cache and the VM's LRU
1426 * list. The page is returned locked and with an increased
1427 * refcount. Otherwise, NULL is returned.
Mel Gorman2457aec2014-06-04 16:10:31 -07001428 *
1429 * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
1430 * if the GFP flags specified for FGP_CREAT are atomic.
1431 *
1432 * If there is a page cache page, it is returned with an increased refcount.
Johannes Weiner0cd61442014-04-03 14:47:46 -07001433 */
Mel Gorman2457aec2014-06-04 16:10:31 -07001434struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
Michal Hocko45f87de2014-12-29 20:30:35 +01001435 int fgp_flags, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
Nick Piggineb2be182007-10-16 01:24:57 -07001437 struct page *page;
Mel Gorman2457aec2014-06-04 16:10:31 -07001438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439repeat:
Mel Gorman2457aec2014-06-04 16:10:31 -07001440 page = find_get_entry(mapping, offset);
1441 if (radix_tree_exceptional_entry(page))
1442 page = NULL;
1443 if (!page)
1444 goto no_page;
1445
1446 if (fgp_flags & FGP_LOCK) {
1447 if (fgp_flags & FGP_NOWAIT) {
1448 if (!trylock_page(page)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001449 put_page(page);
Mel Gorman2457aec2014-06-04 16:10:31 -07001450 return NULL;
1451 }
1452 } else {
1453 lock_page(page);
1454 }
1455
1456 /* Has the page been truncated? */
1457 if (unlikely(page->mapping != mapping)) {
1458 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001459 put_page(page);
Mel Gorman2457aec2014-06-04 16:10:31 -07001460 goto repeat;
1461 }
1462 VM_BUG_ON_PAGE(page->index != offset, page);
1463 }
1464
1465 if (page && (fgp_flags & FGP_ACCESSED))
1466 mark_page_accessed(page);
1467
1468no_page:
1469 if (!page && (fgp_flags & FGP_CREAT)) {
1470 int err;
1471 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
Michal Hocko45f87de2014-12-29 20:30:35 +01001472 gfp_mask |= __GFP_WRITE;
1473 if (fgp_flags & FGP_NOFS)
1474 gfp_mask &= ~__GFP_FS;
Mel Gorman2457aec2014-06-04 16:10:31 -07001475
Michal Hocko45f87de2014-12-29 20:30:35 +01001476 page = __page_cache_alloc(gfp_mask);
Nick Piggineb2be182007-10-16 01:24:57 -07001477 if (!page)
1478 return NULL;
Mel Gorman2457aec2014-06-04 16:10:31 -07001479
1480 if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
1481 fgp_flags |= FGP_LOCK;
1482
Hugh Dickinseb39d612014-08-06 16:06:43 -07001483 /* Init accessed so avoid atomic mark_page_accessed later */
Mel Gorman2457aec2014-06-04 16:10:31 -07001484 if (fgp_flags & FGP_ACCESSED)
Hugh Dickinseb39d612014-08-06 16:06:43 -07001485 __SetPageReferenced(page);
Mel Gorman2457aec2014-06-04 16:10:31 -07001486
Michal Hocko45f87de2014-12-29 20:30:35 +01001487 err = add_to_page_cache_lru(page, mapping, offset,
1488 gfp_mask & GFP_RECLAIM_MASK);
Nick Piggineb2be182007-10-16 01:24:57 -07001489 if (unlikely(err)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001490 put_page(page);
Nick Piggineb2be182007-10-16 01:24:57 -07001491 page = NULL;
1492 if (err == -EEXIST)
1493 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 }
Mel Gorman2457aec2014-06-04 16:10:31 -07001496
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 return page;
1498}
Mel Gorman2457aec2014-06-04 16:10:31 -07001499EXPORT_SYMBOL(pagecache_get_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
1501/**
Johannes Weiner0cd61442014-04-03 14:47:46 -07001502 * find_get_entries - gang pagecache lookup
1503 * @mapping: The address_space to search
1504 * @start: The starting page cache index
1505 * @nr_entries: The maximum number of entries
1506 * @entries: Where the resulting entries are placed
1507 * @indices: The cache indices corresponding to the entries in @entries
1508 *
1509 * find_get_entries() will search for and return a group of up to
1510 * @nr_entries entries in the mapping. The entries are placed at
1511 * @entries. find_get_entries() takes a reference against any actual
1512 * pages it returns.
1513 *
1514 * The search returns a group of mapping-contiguous page cache entries
1515 * with ascending indexes. There may be holes in the indices due to
1516 * not-present pages.
1517 *
Johannes Weiner139b6a62014-05-06 12:50:05 -07001518 * Any shadow entries of evicted pages, or swap entries from
1519 * shmem/tmpfs, are included in the returned array.
Johannes Weiner0cd61442014-04-03 14:47:46 -07001520 *
1521 * find_get_entries() returns the number of pages and shadow entries
1522 * which were found.
1523 */
1524unsigned find_get_entries(struct address_space *mapping,
1525 pgoff_t start, unsigned int nr_entries,
1526 struct page **entries, pgoff_t *indices)
1527{
1528 void **slot;
1529 unsigned int ret = 0;
1530 struct radix_tree_iter iter;
1531
1532 if (!nr_entries)
1533 return 0;
1534
1535 rcu_read_lock();
Johannes Weiner0cd61442014-04-03 14:47:46 -07001536 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001537 struct page *head, *page;
Johannes Weiner0cd61442014-04-03 14:47:46 -07001538repeat:
1539 page = radix_tree_deref_slot(slot);
1540 if (unlikely(!page))
1541 continue;
1542 if (radix_tree_exception(page)) {
Matthew Wilcox2cf938a2016-03-17 14:22:03 -07001543 if (radix_tree_deref_retry(page)) {
1544 slot = radix_tree_iter_retry(&iter);
1545 continue;
1546 }
Johannes Weiner0cd61442014-04-03 14:47:46 -07001547 /*
Ross Zwislerf9fe48b2016-01-22 15:10:40 -08001548 * A shadow entry of a recently evicted page, a swap
1549 * entry from shmem/tmpfs or a DAX entry. Return it
1550 * without attempting to raise page count.
Johannes Weiner0cd61442014-04-03 14:47:46 -07001551 */
1552 goto export;
1553 }
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001554
1555 head = compound_head(page);
1556 if (!page_cache_get_speculative(head))
Johannes Weiner0cd61442014-04-03 14:47:46 -07001557 goto repeat;
1558
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001559 /* The page was split under us? */
1560 if (compound_head(page) != head) {
1561 put_page(head);
1562 goto repeat;
1563 }
1564
Johannes Weiner0cd61442014-04-03 14:47:46 -07001565 /* Has the page moved? */
1566 if (unlikely(page != *slot)) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001567 put_page(head);
Johannes Weiner0cd61442014-04-03 14:47:46 -07001568 goto repeat;
1569 }
1570export:
1571 indices[ret] = iter.index;
1572 entries[ret] = page;
1573 if (++ret == nr_entries)
1574 break;
1575 }
1576 rcu_read_unlock();
1577 return ret;
1578}
1579
1580/**
Jan Karab947cee2017-09-06 16:21:21 -07001581 * find_get_pages_range - gang pagecache lookup
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 * @mapping: The address_space to search
1583 * @start: The starting page index
Jan Karab947cee2017-09-06 16:21:21 -07001584 * @end: The final page index (inclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 * @nr_pages: The maximum number of pages
1586 * @pages: Where the resulting pages are placed
1587 *
Jan Karab947cee2017-09-06 16:21:21 -07001588 * find_get_pages_range() will search for and return a group of up to @nr_pages
1589 * pages in the mapping starting at index @start and up to index @end
1590 * (inclusive). The pages are placed at @pages. find_get_pages_range() takes
1591 * a reference against the returned pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 *
1593 * The search returns a group of mapping-contiguous pages with ascending
1594 * indexes. There may be holes in the indices due to not-present pages.
Jan Karad72dc8a2017-09-06 16:21:18 -07001595 * We also update @start to index the next page for the traversal.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 *
Jan Karab947cee2017-09-06 16:21:21 -07001597 * find_get_pages_range() returns the number of pages which were found. If this
1598 * number is smaller than @nr_pages, the end of specified range has been
1599 * reached.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 */
Jan Karab947cee2017-09-06 16:21:21 -07001601unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1602 pgoff_t end, unsigned int nr_pages,
1603 struct page **pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604{
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001605 struct radix_tree_iter iter;
1606 void **slot;
1607 unsigned ret = 0;
1608
1609 if (unlikely(!nr_pages))
1610 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Nick Piggina60637c2008-07-25 19:45:31 -07001612 rcu_read_lock();
Jan Karad72dc8a2017-09-06 16:21:18 -07001613 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, *start) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001614 struct page *head, *page;
Jan Karab947cee2017-09-06 16:21:21 -07001615
1616 if (iter.index > end)
1617 break;
Nick Piggina60637c2008-07-25 19:45:31 -07001618repeat:
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001619 page = radix_tree_deref_slot(slot);
Nick Piggina60637c2008-07-25 19:45:31 -07001620 if (unlikely(!page))
1621 continue;
Hugh Dickins9d8aa4e2011-03-22 16:33:06 -07001622
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001623 if (radix_tree_exception(page)) {
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001624 if (radix_tree_deref_retry(page)) {
Matthew Wilcox2cf938a2016-03-17 14:22:03 -07001625 slot = radix_tree_iter_retry(&iter);
1626 continue;
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001627 }
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001628 /*
Johannes Weiner139b6a62014-05-06 12:50:05 -07001629 * A shadow entry of a recently evicted page,
1630 * or a swap entry from shmem/tmpfs. Skip
1631 * over it.
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001632 */
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001633 continue;
Nick Piggin27d20fd2010-11-11 14:05:19 -08001634 }
Nick Piggina60637c2008-07-25 19:45:31 -07001635
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001636 head = compound_head(page);
1637 if (!page_cache_get_speculative(head))
Nick Piggina60637c2008-07-25 19:45:31 -07001638 goto repeat;
1639
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001640 /* The page was split under us? */
1641 if (compound_head(page) != head) {
1642 put_page(head);
1643 goto repeat;
1644 }
1645
Nick Piggina60637c2008-07-25 19:45:31 -07001646 /* Has the page moved? */
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001647 if (unlikely(page != *slot)) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001648 put_page(head);
Nick Piggina60637c2008-07-25 19:45:31 -07001649 goto repeat;
1650 }
1651
1652 pages[ret] = page;
Jan Karab947cee2017-09-06 16:21:21 -07001653 if (++ret == nr_pages) {
1654 *start = pages[ret - 1]->index + 1;
1655 goto out;
1656 }
Nick Piggina60637c2008-07-25 19:45:31 -07001657 }
Hugh Dickins5b280c02011-03-22 16:33:07 -07001658
Jan Karab947cee2017-09-06 16:21:21 -07001659 /*
1660 * We come here when there is no page beyond @end. We take care to not
1661 * overflow the index @start as it confuses some of the callers. This
1662 * breaks the iteration when there is page at index -1 but that is
1663 * already broken anyway.
1664 */
1665 if (end == (pgoff_t)-1)
1666 *start = (pgoff_t)-1;
1667 else
1668 *start = end + 1;
1669out:
Nick Piggina60637c2008-07-25 19:45:31 -07001670 rcu_read_unlock();
Jan Karad72dc8a2017-09-06 16:21:18 -07001671
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 return ret;
1673}
1674
Jens Axboeebf43502006-04-27 08:46:01 +02001675/**
1676 * find_get_pages_contig - gang contiguous pagecache lookup
1677 * @mapping: The address_space to search
1678 * @index: The starting page index
1679 * @nr_pages: The maximum number of pages
1680 * @pages: Where the resulting pages are placed
1681 *
1682 * find_get_pages_contig() works exactly like find_get_pages(), except
1683 * that the returned number of pages are guaranteed to be contiguous.
1684 *
1685 * find_get_pages_contig() returns the number of pages which were found.
1686 */
1687unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
1688 unsigned int nr_pages, struct page **pages)
1689{
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001690 struct radix_tree_iter iter;
1691 void **slot;
1692 unsigned int ret = 0;
1693
1694 if (unlikely(!nr_pages))
1695 return 0;
Jens Axboeebf43502006-04-27 08:46:01 +02001696
Nick Piggina60637c2008-07-25 19:45:31 -07001697 rcu_read_lock();
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001698 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001699 struct page *head, *page;
Nick Piggina60637c2008-07-25 19:45:31 -07001700repeat:
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001701 page = radix_tree_deref_slot(slot);
1702 /* The hole, there no reason to continue */
Nick Piggina60637c2008-07-25 19:45:31 -07001703 if (unlikely(!page))
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001704 break;
Hugh Dickins9d8aa4e2011-03-22 16:33:06 -07001705
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001706 if (radix_tree_exception(page)) {
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001707 if (radix_tree_deref_retry(page)) {
Matthew Wilcox2cf938a2016-03-17 14:22:03 -07001708 slot = radix_tree_iter_retry(&iter);
1709 continue;
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001710 }
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001711 /*
Johannes Weiner139b6a62014-05-06 12:50:05 -07001712 * A shadow entry of a recently evicted page,
1713 * or a swap entry from shmem/tmpfs. Stop
1714 * looking for contiguous pages.
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001715 */
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001716 break;
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001717 }
Nick Piggina60637c2008-07-25 19:45:31 -07001718
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001719 head = compound_head(page);
1720 if (!page_cache_get_speculative(head))
Nick Piggina60637c2008-07-25 19:45:31 -07001721 goto repeat;
1722
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001723 /* The page was split under us? */
1724 if (compound_head(page) != head) {
1725 put_page(head);
1726 goto repeat;
1727 }
1728
Nick Piggina60637c2008-07-25 19:45:31 -07001729 /* Has the page moved? */
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001730 if (unlikely(page != *slot)) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001731 put_page(head);
Nick Piggina60637c2008-07-25 19:45:31 -07001732 goto repeat;
1733 }
1734
Nick Piggin9cbb4cb2011-01-13 15:45:51 -08001735 /*
1736 * must check mapping and index after taking the ref.
1737 * otherwise we can get both false positives and false
1738 * negatives, which is just confusing to the caller.
1739 */
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001740 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001741 put_page(page);
Nick Piggin9cbb4cb2011-01-13 15:45:51 -08001742 break;
1743 }
1744
Nick Piggina60637c2008-07-25 19:45:31 -07001745 pages[ret] = page;
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001746 if (++ret == nr_pages)
1747 break;
Jens Axboeebf43502006-04-27 08:46:01 +02001748 }
Nick Piggina60637c2008-07-25 19:45:31 -07001749 rcu_read_unlock();
1750 return ret;
Jens Axboeebf43502006-04-27 08:46:01 +02001751}
David Howellsef71c152007-05-09 02:33:44 -07001752EXPORT_SYMBOL(find_get_pages_contig);
Jens Axboeebf43502006-04-27 08:46:01 +02001753
Randy Dunlap485bb992006-06-23 02:03:49 -07001754/**
Jan Kara72b045a2017-11-15 17:34:33 -08001755 * find_get_pages_range_tag - find and return pages in given range matching @tag
Randy Dunlap485bb992006-06-23 02:03:49 -07001756 * @mapping: the address_space to search
1757 * @index: the starting page index
Jan Kara72b045a2017-11-15 17:34:33 -08001758 * @end: The final page index (inclusive)
Randy Dunlap485bb992006-06-23 02:03:49 -07001759 * @tag: the tag index
1760 * @nr_pages: the maximum number of pages
1761 * @pages: where the resulting pages are placed
1762 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 * Like find_get_pages, except we only return pages which are tagged with
Randy Dunlap485bb992006-06-23 02:03:49 -07001764 * @tag. We update @index to index the next page for the traversal.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 */
Jan Kara72b045a2017-11-15 17:34:33 -08001766unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
1767 pgoff_t end, int tag, unsigned int nr_pages,
1768 struct page **pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769{
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001770 struct radix_tree_iter iter;
1771 void **slot;
1772 unsigned ret = 0;
1773
1774 if (unlikely(!nr_pages))
1775 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Nick Piggina60637c2008-07-25 19:45:31 -07001777 rcu_read_lock();
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001778 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1779 &iter, *index, tag) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001780 struct page *head, *page;
Jan Kara72b045a2017-11-15 17:34:33 -08001781
1782 if (iter.index > end)
1783 break;
Nick Piggina60637c2008-07-25 19:45:31 -07001784repeat:
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001785 page = radix_tree_deref_slot(slot);
Nick Piggina60637c2008-07-25 19:45:31 -07001786 if (unlikely(!page))
1787 continue;
Hugh Dickins9d8aa4e2011-03-22 16:33:06 -07001788
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001789 if (radix_tree_exception(page)) {
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001790 if (radix_tree_deref_retry(page)) {
Matthew Wilcox2cf938a2016-03-17 14:22:03 -07001791 slot = radix_tree_iter_retry(&iter);
1792 continue;
Hugh Dickins8079b1c2011-08-03 16:21:28 -07001793 }
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001794 /*
Johannes Weiner139b6a62014-05-06 12:50:05 -07001795 * A shadow entry of a recently evicted page.
1796 *
1797 * Those entries should never be tagged, but
1798 * this tree walk is lockless and the tags are
1799 * looked up in bulk, one radix tree node at a
1800 * time, so there is a sizable window for page
1801 * reclaim to evict a page we saw tagged.
1802 *
1803 * Skip over it.
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001804 */
Johannes Weiner139b6a62014-05-06 12:50:05 -07001805 continue;
Hugh Dickinsa2c16d62011-08-03 16:21:19 -07001806 }
Nick Piggina60637c2008-07-25 19:45:31 -07001807
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001808 head = compound_head(page);
1809 if (!page_cache_get_speculative(head))
Nick Piggina60637c2008-07-25 19:45:31 -07001810 goto repeat;
1811
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001812 /* The page was split under us? */
1813 if (compound_head(page) != head) {
1814 put_page(head);
1815 goto repeat;
1816 }
1817
Nick Piggina60637c2008-07-25 19:45:31 -07001818 /* Has the page moved? */
Konstantin Khlebnikov0fc9d102012-03-28 14:42:54 -07001819 if (unlikely(page != *slot)) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001820 put_page(head);
Nick Piggina60637c2008-07-25 19:45:31 -07001821 goto repeat;
1822 }
1823
1824 pages[ret] = page;
Jan Kara72b045a2017-11-15 17:34:33 -08001825 if (++ret == nr_pages) {
1826 *index = pages[ret - 1]->index + 1;
1827 goto out;
1828 }
Nick Piggina60637c2008-07-25 19:45:31 -07001829 }
Hugh Dickins5b280c02011-03-22 16:33:07 -07001830
Jan Kara72b045a2017-11-15 17:34:33 -08001831 /*
1832 * We come here when we got at @end. We take care to not overflow the
1833 * index @index as it confuses some of the callers. This breaks the
1834 * iteration when there is page at index -1 but that is already broken
1835 * anyway.
1836 */
1837 if (end == (pgoff_t)-1)
1838 *index = (pgoff_t)-1;
1839 else
1840 *index = end + 1;
1841out:
Nick Piggina60637c2008-07-25 19:45:31 -07001842 rcu_read_unlock();
1843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 return ret;
1845}
Jan Kara72b045a2017-11-15 17:34:33 -08001846EXPORT_SYMBOL(find_get_pages_range_tag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
Ross Zwisler7e7f7742016-01-22 15:10:44 -08001848/**
1849 * find_get_entries_tag - find and return entries that match @tag
1850 * @mapping: the address_space to search
1851 * @start: the starting page cache index
1852 * @tag: the tag index
1853 * @nr_entries: the maximum number of entries
1854 * @entries: where the resulting entries are placed
1855 * @indices: the cache indices corresponding to the entries in @entries
1856 *
1857 * Like find_get_entries, except we only return entries which are tagged with
1858 * @tag.
1859 */
1860unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
1861 int tag, unsigned int nr_entries,
1862 struct page **entries, pgoff_t *indices)
1863{
1864 void **slot;
1865 unsigned int ret = 0;
1866 struct radix_tree_iter iter;
1867
1868 if (!nr_entries)
1869 return 0;
1870
1871 rcu_read_lock();
Ross Zwisler7e7f7742016-01-22 15:10:44 -08001872 radix_tree_for_each_tagged(slot, &mapping->page_tree,
1873 &iter, start, tag) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001874 struct page *head, *page;
Ross Zwisler7e7f7742016-01-22 15:10:44 -08001875repeat:
1876 page = radix_tree_deref_slot(slot);
1877 if (unlikely(!page))
1878 continue;
1879 if (radix_tree_exception(page)) {
1880 if (radix_tree_deref_retry(page)) {
Matthew Wilcox2cf938a2016-03-17 14:22:03 -07001881 slot = radix_tree_iter_retry(&iter);
1882 continue;
Ross Zwisler7e7f7742016-01-22 15:10:44 -08001883 }
1884
1885 /*
1886 * A shadow entry of a recently evicted page, a swap
1887 * entry from shmem/tmpfs or a DAX entry. Return it
1888 * without attempting to raise page count.
1889 */
1890 goto export;
1891 }
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001892
1893 head = compound_head(page);
1894 if (!page_cache_get_speculative(head))
Ross Zwisler7e7f7742016-01-22 15:10:44 -08001895 goto repeat;
1896
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001897 /* The page was split under us? */
1898 if (compound_head(page) != head) {
1899 put_page(head);
1900 goto repeat;
1901 }
1902
Ross Zwisler7e7f7742016-01-22 15:10:44 -08001903 /* Has the page moved? */
1904 if (unlikely(page != *slot)) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07001905 put_page(head);
Ross Zwisler7e7f7742016-01-22 15:10:44 -08001906 goto repeat;
1907 }
1908export:
1909 indices[ret] = iter.index;
1910 entries[ret] = page;
1911 if (++ret == nr_entries)
1912 break;
1913 }
1914 rcu_read_unlock();
1915 return ret;
1916}
1917EXPORT_SYMBOL(find_get_entries_tag);
1918
Wu Fengguang76d42bd2006-06-25 05:48:43 -07001919/*
1920 * CD/DVDs are error prone. When a medium error occurs, the driver may fail
1921 * a _large_ part of the i/o request. Imagine the worst scenario:
1922 *
1923 * ---R__________________________________________B__________
1924 * ^ reading here ^ bad block(assume 4k)
1925 *
1926 * read(R) => miss => readahead(R...B) => media error => frustrating retries
1927 * => failing the whole request => read(R) => read(R+1) =>
1928 * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
1929 * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
1930 * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
1931 *
1932 * It is going insane. Fix it by quickly scaling down the readahead size.
1933 */
1934static void shrink_readahead_size_eio(struct file *filp,
1935 struct file_ra_state *ra)
1936{
Wu Fengguang76d42bd2006-06-25 05:48:43 -07001937 ra->ra_pages /= 4;
Wu Fengguang76d42bd2006-06-25 05:48:43 -07001938}
1939
Randy Dunlap485bb992006-06-23 02:03:49 -07001940/**
Christoph Hellwig47c27bc2017-08-29 16:13:18 +02001941 * generic_file_buffered_read - generic file read routine
1942 * @iocb: the iocb to read
Al Viro6e58e792014-02-03 17:07:03 -05001943 * @iter: data destination
1944 * @written: already copied
Randy Dunlap485bb992006-06-23 02:03:49 -07001945 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 * This is a generic file read routine, and uses the
Randy Dunlap485bb992006-06-23 02:03:49 -07001947 * mapping->a_ops->readpage() function for the actual low-level stuff.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 *
1949 * This is really ugly. But the goto's actually try to clarify some
1950 * of the logic when it comes to error handling etc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 */
Christoph Hellwig47c27bc2017-08-29 16:13:18 +02001952static ssize_t generic_file_buffered_read(struct kiocb *iocb,
Al Viro6e58e792014-02-03 17:07:03 -05001953 struct iov_iter *iter, ssize_t written)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954{
Christoph Hellwig47c27bc2017-08-29 16:13:18 +02001955 struct file *filp = iocb->ki_filp;
Christoph Hellwig36e78912008-02-08 04:21:24 -08001956 struct address_space *mapping = filp->f_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 struct inode *inode = mapping->host;
Christoph Hellwig36e78912008-02-08 04:21:24 -08001958 struct file_ra_state *ra = &filp->f_ra;
Christoph Hellwig47c27bc2017-08-29 16:13:18 +02001959 loff_t *ppos = &iocb->ki_pos;
Fengguang Wu57f6b962007-10-16 01:24:37 -07001960 pgoff_t index;
1961 pgoff_t last_index;
1962 pgoff_t prev_index;
1963 unsigned long offset; /* offset into pagecache page */
Jan Karaec0f1632007-05-06 14:49:25 -07001964 unsigned int prev_offset;
Al Viro6e58e792014-02-03 17:07:03 -05001965 int error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
Wei Fangc2a97372016-10-07 17:01:52 -07001967 if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
Linus Torvaldsd05c5f72016-12-14 12:45:25 -08001968 return 0;
Wei Fangc2a97372016-10-07 17:01:52 -07001969 iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
1970
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001971 index = *ppos >> PAGE_SHIFT;
1972 prev_index = ra->prev_pos >> PAGE_SHIFT;
1973 prev_offset = ra->prev_pos & (PAGE_SIZE-1);
1974 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
1975 offset = *ppos & ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 for (;;) {
1978 struct page *page;
Fengguang Wu57f6b962007-10-16 01:24:37 -07001979 pgoff_t end_index;
NeilBrowna32ea1e2007-07-17 04:03:04 -07001980 loff_t isize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 unsigned long nr, ret;
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 cond_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984find_page:
Michal Hocko5abf1862017-02-03 13:13:29 -08001985 if (fatal_signal_pending(current)) {
1986 error = -EINTR;
1987 goto out;
1988 }
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 page = find_get_page(mapping, index);
Fengguang Wu3ea89ee2007-07-19 01:48:02 -07001991 if (!page) {
Milosz Tanski3239d832017-08-29 16:13:19 +02001992 if (iocb->ki_flags & IOCB_NOWAIT)
1993 goto would_block;
Rusty Russellcf914a72007-07-19 01:48:08 -07001994 page_cache_sync_readahead(mapping,
Fengguang Wu7ff81072007-10-16 01:24:35 -07001995 ra, filp,
Fengguang Wu3ea89ee2007-07-19 01:48:02 -07001996 index, last_index - index);
1997 page = find_get_page(mapping, index);
1998 if (unlikely(page == NULL))
1999 goto no_cached_page;
2000 }
2001 if (PageReadahead(page)) {
Rusty Russellcf914a72007-07-19 01:48:08 -07002002 page_cache_async_readahead(mapping,
Fengguang Wu7ff81072007-10-16 01:24:35 -07002003 ra, filp, page,
Fengguang Wu3ea89ee2007-07-19 01:48:02 -07002004 index, last_index - index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 }
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002006 if (!PageUptodate(page)) {
Milosz Tanski3239d832017-08-29 16:13:19 +02002007 if (iocb->ki_flags & IOCB_NOWAIT) {
2008 put_page(page);
2009 goto would_block;
2010 }
2011
Mel Gormanebded022016-03-15 14:55:39 -07002012 /*
2013 * See comment in do_read_cache_page on why
2014 * wait_on_page_locked is used to avoid unnecessarily
2015 * serialisations and why it's safe.
2016 */
Bart Van Asschec4b209a2016-10-07 16:58:33 -07002017 error = wait_on_page_locked_killable(page);
2018 if (unlikely(error))
2019 goto readpage_error;
Mel Gormanebded022016-03-15 14:55:39 -07002020 if (PageUptodate(page))
2021 goto page_ok;
2022
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002023 if (inode->i_blkbits == PAGE_SHIFT ||
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002024 !mapping->a_ops->is_partially_uptodate)
2025 goto page_not_up_to_date;
Eryu Guan6d6d36b2016-11-01 15:43:07 +08002026 /* pipes can't handle partially uptodate pages */
2027 if (unlikely(iter->type & ITER_PIPE))
2028 goto page_not_up_to_date;
Nick Piggin529ae9a2008-08-02 12:01:03 +02002029 if (!trylock_page(page))
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002030 goto page_not_up_to_date;
Dave Hansen8d056cb2010-11-11 14:05:15 -08002031 /* Did it get truncated before we got the lock? */
2032 if (!page->mapping)
2033 goto page_not_up_to_date_locked;
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002034 if (!mapping->a_ops->is_partially_uptodate(page,
Al Viro6e58e792014-02-03 17:07:03 -05002035 offset, iter->count))
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002036 goto page_not_up_to_date_locked;
2037 unlock_page(page);
2038 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039page_ok:
NeilBrowna32ea1e2007-07-17 04:03:04 -07002040 /*
2041 * i_size must be checked after we know the page is Uptodate.
2042 *
2043 * Checking i_size after the check allows us to calculate
2044 * the correct value for "nr", which means the zero-filled
2045 * part of the page is not copied back to userspace (unless
2046 * another truncate extends the file - this is desired though).
2047 */
2048
2049 isize = i_size_read(inode);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002050 end_index = (isize - 1) >> PAGE_SHIFT;
NeilBrowna32ea1e2007-07-17 04:03:04 -07002051 if (unlikely(!isize || index > end_index)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002052 put_page(page);
NeilBrowna32ea1e2007-07-17 04:03:04 -07002053 goto out;
2054 }
2055
2056 /* nr is the maximum number of bytes to copy from this page */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002057 nr = PAGE_SIZE;
NeilBrowna32ea1e2007-07-17 04:03:04 -07002058 if (index == end_index) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002059 nr = ((isize - 1) & ~PAGE_MASK) + 1;
NeilBrowna32ea1e2007-07-17 04:03:04 -07002060 if (nr <= offset) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002061 put_page(page);
NeilBrowna32ea1e2007-07-17 04:03:04 -07002062 goto out;
2063 }
2064 }
2065 nr = nr - offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
2067 /* If users can be writing to this page using arbitrary
2068 * virtual addresses, take care about potential aliasing
2069 * before reading the page on the kernel side.
2070 */
2071 if (mapping_writably_mapped(mapping))
2072 flush_dcache_page(page);
2073
2074 /*
Jan Karaec0f1632007-05-06 14:49:25 -07002075 * When a sequential read accesses a page several times,
2076 * only mark it as accessed the first time.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 */
Jan Karaec0f1632007-05-06 14:49:25 -07002078 if (prev_index != index || offset != prev_offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 mark_page_accessed(page);
2080 prev_index = index;
2081
2082 /*
2083 * Ok, we have the page, and it's up-to-date, so
2084 * now we can copy it to user space...
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 */
Al Viro6e58e792014-02-03 17:07:03 -05002086
2087 ret = copy_page_to_iter(page, offset, nr, iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 offset += ret;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002089 index += offset >> PAGE_SHIFT;
2090 offset &= ~PAGE_MASK;
Jan Kara6ce745e2007-05-06 14:49:26 -07002091 prev_offset = offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002093 put_page(page);
Al Viro6e58e792014-02-03 17:07:03 -05002094 written += ret;
2095 if (!iov_iter_count(iter))
2096 goto out;
2097 if (ret < nr) {
2098 error = -EFAULT;
2099 goto out;
2100 }
2101 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103page_not_up_to_date:
2104 /* Get exclusive access to the page ... */
Oleg Nesterov85462322008-06-08 21:20:43 +04002105 error = lock_page_killable(page);
2106 if (unlikely(error))
2107 goto readpage_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
Hisashi Hifumi8ab22b92008-07-28 15:46:36 -07002109page_not_up_to_date_locked:
Nick Pigginda6052f2006-09-25 23:31:35 -07002110 /* Did it get truncated before we got the lock? */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 if (!page->mapping) {
2112 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002113 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 continue;
2115 }
2116
2117 /* Did somebody else fill it already? */
2118 if (PageUptodate(page)) {
2119 unlock_page(page);
2120 goto page_ok;
2121 }
2122
2123readpage:
Jeff Moyer91803b42010-05-26 11:49:40 -04002124 /*
2125 * A previous I/O error may have been due to temporary
2126 * failures, eg. multipath errors.
2127 * PG_error will be set again if readpage fails.
2128 */
2129 ClearPageError(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 /* Start the actual read. The read will unlock the page. */
2131 error = mapping->a_ops->readpage(filp, page);
2132
Zach Brown994fc28c2005-12-15 14:28:17 -08002133 if (unlikely(error)) {
2134 if (error == AOP_TRUNCATED_PAGE) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002135 put_page(page);
Al Viro6e58e792014-02-03 17:07:03 -05002136 error = 0;
Zach Brown994fc28c2005-12-15 14:28:17 -08002137 goto find_page;
2138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 goto readpage_error;
Zach Brown994fc28c2005-12-15 14:28:17 -08002140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
2142 if (!PageUptodate(page)) {
Oleg Nesterov85462322008-06-08 21:20:43 +04002143 error = lock_page_killable(page);
2144 if (unlikely(error))
2145 goto readpage_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 if (!PageUptodate(page)) {
2147 if (page->mapping == NULL) {
2148 /*
Christoph Hellwig2ecdc822010-01-26 17:27:20 +01002149 * invalidate_mapping_pages got it
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 */
2151 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002152 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 goto find_page;
2154 }
2155 unlock_page(page);
Fengguang Wu7ff81072007-10-16 01:24:35 -07002156 shrink_readahead_size_eio(filp, ra);
Oleg Nesterov85462322008-06-08 21:20:43 +04002157 error = -EIO;
2158 goto readpage_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 }
2160 unlock_page(page);
2161 }
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 goto page_ok;
2164
2165readpage_error:
2166 /* UHHUH! A synchronous read error occurred. Report it */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002167 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 goto out;
2169
2170no_cached_page:
2171 /*
2172 * Ok, it wasn't cached, so we need to create a new
2173 * page..
2174 */
Nick Piggineb2be182007-10-16 01:24:57 -07002175 page = page_cache_alloc_cold(mapping);
2176 if (!page) {
Al Viro6e58e792014-02-03 17:07:03 -05002177 error = -ENOMEM;
Nick Piggineb2be182007-10-16 01:24:57 -07002178 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 }
Michal Hocko6afdb852015-06-24 16:58:06 -07002180 error = add_to_page_cache_lru(page, mapping, index,
Michal Hockoc62d2552015-11-06 16:28:49 -08002181 mapping_gfp_constraint(mapping, GFP_KERNEL));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 if (error) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002183 put_page(page);
Al Viro6e58e792014-02-03 17:07:03 -05002184 if (error == -EEXIST) {
2185 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 goto find_page;
Al Viro6e58e792014-02-03 17:07:03 -05002187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 goto out;
2189 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 goto readpage;
2191 }
2192
Milosz Tanski3239d832017-08-29 16:13:19 +02002193would_block:
2194 error = -EAGAIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195out:
Fengguang Wu7ff81072007-10-16 01:24:35 -07002196 ra->prev_pos = prev_index;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002197 ra->prev_pos <<= PAGE_SHIFT;
Fengguang Wu7ff81072007-10-16 01:24:35 -07002198 ra->prev_pos |= prev_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002200 *ppos = ((loff_t)index << PAGE_SHIFT) + offset;
Krishna Kumar0c6aa262008-10-15 22:01:13 -07002201 file_accessed(filp);
Al Viro6e58e792014-02-03 17:07:03 -05002202 return written ? written : error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203}
2204
Randy Dunlap485bb992006-06-23 02:03:49 -07002205/**
Al Viro6abd2322014-04-04 14:20:57 -04002206 * generic_file_read_iter - generic filesystem read routine
Randy Dunlap485bb992006-06-23 02:03:49 -07002207 * @iocb: kernel I/O control block
Al Viro6abd2322014-04-04 14:20:57 -04002208 * @iter: destination for the data read
Randy Dunlap485bb992006-06-23 02:03:49 -07002209 *
Al Viro6abd2322014-04-04 14:20:57 -04002210 * This is the "read_iter()" routine for all filesystems
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 * that can use the page cache directly.
2212 */
2213ssize_t
Al Viroed978a82014-03-05 22:53:04 -05002214generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215{
Nicolai Stangee7080a42016-03-25 14:22:14 -07002216 size_t count = iov_iter_count(iter);
Christoph Hellwig47c27bc2017-08-29 16:13:18 +02002217 ssize_t retval = 0;
Nicolai Stangee7080a42016-03-25 14:22:14 -07002218
2219 if (!count)
2220 goto out; /* skip atime */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221
Al Viro2ba48ce2015-04-09 13:52:01 -04002222 if (iocb->ki_flags & IOCB_DIRECT) {
Christoph Hellwig47c27bc2017-08-29 16:13:18 +02002223 struct file *file = iocb->ki_filp;
Al Viroed978a82014-03-05 22:53:04 -05002224 struct address_space *mapping = file->f_mapping;
2225 struct inode *inode = mapping->host;
Badari Pulavarty543ade12006-09-30 23:28:48 -07002226 loff_t size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 size = i_size_read(inode);
Goldwyn Rodrigues6be96d32017-06-20 07:05:44 -05002229 if (iocb->ki_flags & IOCB_NOWAIT) {
2230 if (filemap_range_has_page(mapping, iocb->ki_pos,
2231 iocb->ki_pos + count - 1))
2232 return -EAGAIN;
2233 } else {
2234 retval = filemap_write_and_wait_range(mapping,
2235 iocb->ki_pos,
2236 iocb->ki_pos + count - 1);
2237 if (retval < 0)
2238 goto out;
2239 }
Al Viroed978a82014-03-05 22:53:04 -05002240
Christoph Hellwig0d5b0cf2016-10-03 09:48:08 +11002241 file_accessed(file);
2242
Al Viro5ecda132017-04-13 14:13:36 -04002243 retval = mapping->a_ops->direct_IO(iocb, iter);
Al Viroc3a69022016-10-10 13:26:27 -04002244 if (retval >= 0) {
Christoph Hellwigc64fb5c2016-04-07 08:51:55 -07002245 iocb->ki_pos += retval;
Al Viro5ecda132017-04-13 14:13:36 -04002246 count -= retval;
Steven Whitehouse9fe55ee2014-01-24 14:42:22 +00002247 }
Al Viro5b47d592017-05-08 13:54:47 -04002248 iov_iter_revert(iter, count - iov_iter_count(iter));
Josef Bacik66f998f2010-05-23 11:00:54 -04002249
Steven Whitehouse9fe55ee2014-01-24 14:42:22 +00002250 /*
2251 * Btrfs can have a short DIO read if we encounter
2252 * compressed extents, so if there was an error, or if
2253 * we've already read everything we wanted to, or if
2254 * there was a short read because we hit EOF, go ahead
2255 * and return. Otherwise fallthrough to buffered io for
Matthew Wilcoxfbbbad42015-02-16 15:58:53 -08002256 * the rest of the read. Buffered reads will not work for
2257 * DAX files, so don't bother trying.
Steven Whitehouse9fe55ee2014-01-24 14:42:22 +00002258 */
Al Viro5ecda132017-04-13 14:13:36 -04002259 if (retval < 0 || !count || iocb->ki_pos >= size ||
Christoph Hellwig0d5b0cf2016-10-03 09:48:08 +11002260 IS_DAX(inode))
Steven Whitehouse9fe55ee2014-01-24 14:42:22 +00002261 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 }
2263
Christoph Hellwig47c27bc2017-08-29 16:13:18 +02002264 retval = generic_file_buffered_read(iocb, iter, retval);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265out:
2266 return retval;
2267}
Al Viroed978a82014-03-05 22:53:04 -05002268EXPORT_SYMBOL(generic_file_read_iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270#ifdef CONFIG_MMU
Randy Dunlap485bb992006-06-23 02:03:49 -07002271/**
2272 * page_cache_read - adds requested page to the page cache if not already there
2273 * @file: file to read
2274 * @offset: page index
Randy Dunlap62eb3202016-02-11 16:12:58 -08002275 * @gfp_mask: memory allocation flags
Randy Dunlap485bb992006-06-23 02:03:49 -07002276 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 * This adds the requested page to the page cache if it isn't already there,
2278 * and schedules an I/O to read in its contents from disk.
2279 */
Michal Hockoc20cd452016-01-14 15:20:12 -08002280static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281{
2282 struct address_space *mapping = file->f_mapping;
Paul McQuade99dadfd2014-10-09 15:29:03 -07002283 struct page *page;
Zach Brown994fc28c2005-12-15 14:28:17 -08002284 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
Zach Brown994fc28c2005-12-15 14:28:17 -08002286 do {
Michal Hockoc20cd452016-01-14 15:20:12 -08002287 page = __page_cache_alloc(gfp_mask|__GFP_COLD);
Zach Brown994fc28c2005-12-15 14:28:17 -08002288 if (!page)
2289 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290
Michal Hockoc20cd452016-01-14 15:20:12 -08002291 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
Zach Brown994fc28c2005-12-15 14:28:17 -08002292 if (ret == 0)
2293 ret = mapping->a_ops->readpage(file, page);
2294 else if (ret == -EEXIST)
2295 ret = 0; /* losing race to add is OK */
2296
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002297 put_page(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
Zach Brown994fc28c2005-12-15 14:28:17 -08002299 } while (ret == AOP_TRUNCATED_PAGE);
Paul McQuade99dadfd2014-10-09 15:29:03 -07002300
Zach Brown994fc28c2005-12-15 14:28:17 -08002301 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302}
2303
2304#define MMAP_LOTSAMISS (100)
2305
Linus Torvaldsef00e082009-06-16 15:31:25 -07002306/*
2307 * Synchronous readahead happens when we don't even find
2308 * a page in the page cache at all.
2309 */
2310static void do_sync_mmap_readahead(struct vm_area_struct *vma,
2311 struct file_ra_state *ra,
2312 struct file *file,
2313 pgoff_t offset)
2314{
Linus Torvaldsef00e082009-06-16 15:31:25 -07002315 struct address_space *mapping = file->f_mapping;
2316
2317 /* If we don't want any read-ahead, don't bother */
Joe Perches64363aa2013-07-08 16:00:18 -07002318 if (vma->vm_flags & VM_RAND_READ)
Linus Torvaldsef00e082009-06-16 15:31:25 -07002319 return;
Wu Fengguang275b12b2011-05-24 17:12:28 -07002320 if (!ra->ra_pages)
2321 return;
Linus Torvaldsef00e082009-06-16 15:31:25 -07002322
Joe Perches64363aa2013-07-08 16:00:18 -07002323 if (vma->vm_flags & VM_SEQ_READ) {
Wu Fengguang7ffc59b2009-06-16 15:31:38 -07002324 page_cache_sync_readahead(mapping, ra, file, offset,
2325 ra->ra_pages);
Linus Torvaldsef00e082009-06-16 15:31:25 -07002326 return;
2327 }
2328
Andi Kleen207d04b2011-05-24 17:12:29 -07002329 /* Avoid banging the cache line if not needed */
2330 if (ra->mmap_miss < MMAP_LOTSAMISS * 10)
Linus Torvaldsef00e082009-06-16 15:31:25 -07002331 ra->mmap_miss++;
2332
2333 /*
2334 * Do we miss much more than hit in this file? If so,
2335 * stop bothering with read-ahead. It will only hurt.
2336 */
2337 if (ra->mmap_miss > MMAP_LOTSAMISS)
2338 return;
2339
Wu Fengguangd30a1102009-06-16 15:31:30 -07002340 /*
2341 * mmap read-around
2342 */
Roman Gushchin600e19a2015-11-05 18:47:08 -08002343 ra->start = max_t(long, 0, offset - ra->ra_pages / 2);
2344 ra->size = ra->ra_pages;
2345 ra->async_size = ra->ra_pages / 4;
Wu Fengguang275b12b2011-05-24 17:12:28 -07002346 ra_submit(ra, mapping, file);
Linus Torvaldsef00e082009-06-16 15:31:25 -07002347}
2348
2349/*
2350 * Asynchronous readahead happens when we find the page and PG_readahead,
2351 * so we want to possibly extend the readahead further..
2352 */
2353static void do_async_mmap_readahead(struct vm_area_struct *vma,
2354 struct file_ra_state *ra,
2355 struct file *file,
2356 struct page *page,
2357 pgoff_t offset)
2358{
2359 struct address_space *mapping = file->f_mapping;
2360
2361 /* If we don't want any read-ahead, don't bother */
Joe Perches64363aa2013-07-08 16:00:18 -07002362 if (vma->vm_flags & VM_RAND_READ)
Linus Torvaldsef00e082009-06-16 15:31:25 -07002363 return;
2364 if (ra->mmap_miss > 0)
2365 ra->mmap_miss--;
2366 if (PageReadahead(page))
Wu Fengguang2fad6f52009-06-16 15:31:29 -07002367 page_cache_async_readahead(mapping, ra, file,
2368 page, offset, ra->ra_pages);
Linus Torvaldsef00e082009-06-16 15:31:25 -07002369}
2370
Randy Dunlap485bb992006-06-23 02:03:49 -07002371/**
Nick Piggin54cb8822007-07-19 01:46:59 -07002372 * filemap_fault - read in file data for page fault handling
Nick Piggind0217ac2007-07-19 01:47:03 -07002373 * @vmf: struct vm_fault containing details of the fault
Randy Dunlap485bb992006-06-23 02:03:49 -07002374 *
Nick Piggin54cb8822007-07-19 01:46:59 -07002375 * filemap_fault() is invoked via the vma operations vector for a
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 * mapped memory region to read in file data during a page fault.
2377 *
2378 * The goto's are kind of ugly, but this streamlines the normal case of having
2379 * it in the page cache, and handles the special cases reasonably without
2380 * having a lot of duplicated code.
Paul Cassella9a95f3c2014-08-06 16:07:24 -07002381 *
2382 * vma->vm_mm->mmap_sem must be held on entry.
2383 *
2384 * If our return value has VM_FAULT_RETRY set, it's because
2385 * lock_page_or_retry() returned 0.
2386 * The mmap_sem has usually been released in this case.
2387 * See __lock_page_or_retry() for the exception.
2388 *
2389 * If our return value does not have VM_FAULT_RETRY set, the mmap_sem
2390 * has not been released.
2391 *
2392 * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 */
Dave Jiang11bac802017-02-24 14:56:41 -08002394int filemap_fault(struct vm_fault *vmf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395{
2396 int error;
Dave Jiang11bac802017-02-24 14:56:41 -08002397 struct file *file = vmf->vma->vm_file;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 struct address_space *mapping = file->f_mapping;
2399 struct file_ra_state *ra = &file->f_ra;
2400 struct inode *inode = mapping->host;
Linus Torvaldsef00e082009-06-16 15:31:25 -07002401 pgoff_t offset = vmf->pgoff;
Matthew Wilcox9ab25942017-05-03 14:53:29 -07002402 pgoff_t max_off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 struct page *page;
Nick Piggin83c54072007-07-19 01:47:05 -07002404 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405
Matthew Wilcox9ab25942017-05-03 14:53:29 -07002406 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2407 if (unlikely(offset >= max_off))
Linus Torvalds5307cc12007-10-31 09:19:46 -07002408 return VM_FAULT_SIGBUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 /*
Johannes Weiner49426422013-10-16 13:46:59 -07002411 * Do we have something in the page cache already?
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 */
Linus Torvaldsef00e082009-06-16 15:31:25 -07002413 page = find_get_page(mapping, offset);
Shaohua Li45cac652012-10-08 16:32:19 -07002414 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 /*
Linus Torvaldsef00e082009-06-16 15:31:25 -07002416 * We found the page, so try async readahead before
2417 * waiting for the lock.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 */
Dave Jiang11bac802017-02-24 14:56:41 -08002419 do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
Shaohua Li45cac652012-10-08 16:32:19 -07002420 } else if (!page) {
Linus Torvaldsef00e082009-06-16 15:31:25 -07002421 /* No page in the page cache at all */
Dave Jiang11bac802017-02-24 14:56:41 -08002422 do_sync_mmap_readahead(vmf->vma, ra, file, offset);
Linus Torvaldsef00e082009-06-16 15:31:25 -07002423 count_vm_event(PGMAJFAULT);
Roman Gushchin22621852017-07-06 15:40:25 -07002424 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
Linus Torvaldsef00e082009-06-16 15:31:25 -07002425 ret = VM_FAULT_MAJOR;
2426retry_find:
Michel Lespinasseb522c942010-10-26 14:21:56 -07002427 page = find_get_page(mapping, offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 if (!page)
2429 goto no_cached_page;
2430 }
2431
Dave Jiang11bac802017-02-24 14:56:41 -08002432 if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002433 put_page(page);
Michel Lespinassed065bd82010-10-26 14:21:57 -07002434 return ret | VM_FAULT_RETRY;
Michel Lespinassed88c0922010-11-02 13:05:18 -07002435 }
Michel Lespinasseb522c942010-10-26 14:21:56 -07002436
2437 /* Did it get truncated? */
2438 if (unlikely(page->mapping != mapping)) {
2439 unlock_page(page);
2440 put_page(page);
2441 goto retry_find;
2442 }
Sasha Levin309381fea2014-01-23 15:52:54 -08002443 VM_BUG_ON_PAGE(page->index != offset, page);
Michel Lespinasseb522c942010-10-26 14:21:56 -07002444
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 /*
Nick Piggind00806b2007-07-19 01:46:57 -07002446 * We have a locked page in the page cache, now we need to check
2447 * that it's up-to-date. If not, it is going to be due to an error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 */
Nick Piggind00806b2007-07-19 01:46:57 -07002449 if (unlikely(!PageUptodate(page)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 goto page_not_uptodate;
2451
Linus Torvaldsef00e082009-06-16 15:31:25 -07002452 /*
2453 * Found the page and have a reference on it.
2454 * We must recheck i_size under page lock.
2455 */
Matthew Wilcox9ab25942017-05-03 14:53:29 -07002456 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2457 if (unlikely(offset >= max_off)) {
Nick Piggind00806b2007-07-19 01:46:57 -07002458 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002459 put_page(page);
Linus Torvalds5307cc12007-10-31 09:19:46 -07002460 return VM_FAULT_SIGBUS;
Nick Piggind00806b2007-07-19 01:46:57 -07002461 }
2462
Nick Piggind0217ac2007-07-19 01:47:03 -07002463 vmf->page = page;
Nick Piggin83c54072007-07-19 01:47:05 -07002464 return ret | VM_FAULT_LOCKED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466no_cached_page:
2467 /*
2468 * We're only likely to ever get here if MADV_RANDOM is in
2469 * effect.
2470 */
Michal Hockoc20cd452016-01-14 15:20:12 -08002471 error = page_cache_read(file, offset, vmf->gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
2473 /*
2474 * The page we want has now been added to the page cache.
2475 * In the unlikely event that someone removed it in the
2476 * meantime, we'll just come back here and read it again.
2477 */
2478 if (error >= 0)
2479 goto retry_find;
2480
2481 /*
2482 * An error return from page_cache_read can result if the
2483 * system is low on memory, or a problem occurs while trying
2484 * to schedule I/O.
2485 */
2486 if (error == -ENOMEM)
Nick Piggind0217ac2007-07-19 01:47:03 -07002487 return VM_FAULT_OOM;
2488 return VM_FAULT_SIGBUS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489
2490page_not_uptodate:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 /*
2492 * Umm, take care of errors if the page isn't up-to-date.
2493 * Try to re-read it _once_. We do this synchronously,
2494 * because there really aren't any performance issues here
2495 * and we need to check for errors.
2496 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 ClearPageError(page);
Zach Brown994fc28c2005-12-15 14:28:17 -08002498 error = mapping->a_ops->readpage(file, page);
Miklos Szeredi3ef0f722008-05-14 16:05:37 -07002499 if (!error) {
2500 wait_on_page_locked(page);
2501 if (!PageUptodate(page))
2502 error = -EIO;
2503 }
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002504 put_page(page);
Nick Piggind00806b2007-07-19 01:46:57 -07002505
2506 if (!error || error == AOP_TRUNCATED_PAGE)
2507 goto retry_find;
2508
2509 /* Things didn't work out. Return zero to tell the mm layer so. */
2510 shrink_readahead_size_eio(file, ra);
Nick Piggind0217ac2007-07-19 01:47:03 -07002511 return VM_FAULT_SIGBUS;
Nick Piggin54cb8822007-07-19 01:46:59 -07002512}
2513EXPORT_SYMBOL(filemap_fault);
2514
Jan Kara82b0f8c2016-12-14 15:06:58 -08002515void filemap_map_pages(struct vm_fault *vmf,
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002516 pgoff_t start_pgoff, pgoff_t end_pgoff)
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002517{
2518 struct radix_tree_iter iter;
2519 void **slot;
Jan Kara82b0f8c2016-12-14 15:06:58 -08002520 struct file *file = vmf->vma->vm_file;
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002521 struct address_space *mapping = file->f_mapping;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002522 pgoff_t last_pgoff = start_pgoff;
Matthew Wilcox9ab25942017-05-03 14:53:29 -07002523 unsigned long max_idx;
Kirill A. Shutemov83929372016-07-26 15:26:04 -07002524 struct page *head, *page;
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002525
2526 rcu_read_lock();
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002527 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
2528 start_pgoff) {
2529 if (iter.index > end_pgoff)
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002530 break;
2531repeat:
2532 page = radix_tree_deref_slot(slot);
2533 if (unlikely(!page))
2534 goto next;
2535 if (radix_tree_exception(page)) {
Matthew Wilcox2cf938a2016-03-17 14:22:03 -07002536 if (radix_tree_deref_retry(page)) {
2537 slot = radix_tree_iter_retry(&iter);
2538 continue;
2539 }
2540 goto next;
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002541 }
2542
Kirill A. Shutemov83929372016-07-26 15:26:04 -07002543 head = compound_head(page);
2544 if (!page_cache_get_speculative(head))
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002545 goto repeat;
2546
Kirill A. Shutemov83929372016-07-26 15:26:04 -07002547 /* The page was split under us? */
2548 if (compound_head(page) != head) {
2549 put_page(head);
2550 goto repeat;
2551 }
2552
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002553 /* Has the page moved? */
2554 if (unlikely(page != *slot)) {
Kirill A. Shutemov83929372016-07-26 15:26:04 -07002555 put_page(head);
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002556 goto repeat;
2557 }
2558
2559 if (!PageUptodate(page) ||
2560 PageReadahead(page) ||
2561 PageHWPoison(page))
2562 goto skip;
2563 if (!trylock_page(page))
2564 goto skip;
2565
2566 if (page->mapping != mapping || !PageUptodate(page))
2567 goto unlock;
2568
Matthew Wilcox9ab25942017-05-03 14:53:29 -07002569 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2570 if (page->index >= max_idx)
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002571 goto unlock;
2572
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002573 if (file->f_ra.mmap_miss > 0)
2574 file->f_ra.mmap_miss--;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07002575
Jan Kara82b0f8c2016-12-14 15:06:58 -08002576 vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT;
2577 if (vmf->pte)
2578 vmf->pte += iter.index - last_pgoff;
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07002579 last_pgoff = iter.index;
Jan Kara82b0f8c2016-12-14 15:06:58 -08002580 if (alloc_set_pte(vmf, NULL, page))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07002581 goto unlock;
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002582 unlock_page(page);
2583 goto next;
2584unlock:
2585 unlock_page(page);
2586skip:
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002587 put_page(page);
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002588next:
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07002589 /* Huge page is mapped? No need to proceed. */
Jan Kara82b0f8c2016-12-14 15:06:58 -08002590 if (pmd_trans_huge(*vmf->pmd))
Kirill A. Shutemov7267ec002016-07-26 15:25:23 -07002591 break;
Kirill A. Shutemovbae473a2016-07-26 15:25:20 -07002592 if (iter.index == end_pgoff)
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002593 break;
2594 }
2595 rcu_read_unlock();
2596}
2597EXPORT_SYMBOL(filemap_map_pages);
2598
Dave Jiang11bac802017-02-24 14:56:41 -08002599int filemap_page_mkwrite(struct vm_fault *vmf)
Jan Kara4fcf1c62012-06-12 16:20:29 +02002600{
2601 struct page *page = vmf->page;
Dave Jiang11bac802017-02-24 14:56:41 -08002602 struct inode *inode = file_inode(vmf->vma->vm_file);
Jan Kara4fcf1c62012-06-12 16:20:29 +02002603 int ret = VM_FAULT_LOCKED;
2604
Jan Kara14da9202012-06-12 16:20:37 +02002605 sb_start_pagefault(inode->i_sb);
Dave Jiang11bac802017-02-24 14:56:41 -08002606 file_update_time(vmf->vma->vm_file);
Jan Kara4fcf1c62012-06-12 16:20:29 +02002607 lock_page(page);
2608 if (page->mapping != inode->i_mapping) {
2609 unlock_page(page);
2610 ret = VM_FAULT_NOPAGE;
2611 goto out;
2612 }
Jan Kara14da9202012-06-12 16:20:37 +02002613 /*
2614 * We mark the page dirty already here so that when freeze is in
2615 * progress, we are guaranteed that writeback during freezing will
2616 * see the dirty page and writeprotect it again.
2617 */
2618 set_page_dirty(page);
Darrick J. Wong1d1d1a72013-02-21 16:42:51 -08002619 wait_for_stable_page(page);
Jan Kara4fcf1c62012-06-12 16:20:29 +02002620out:
Jan Kara14da9202012-06-12 16:20:37 +02002621 sb_end_pagefault(inode->i_sb);
Jan Kara4fcf1c62012-06-12 16:20:29 +02002622 return ret;
2623}
2624EXPORT_SYMBOL(filemap_page_mkwrite);
2625
Alexey Dobriyanf0f37e2f2009-09-27 22:29:37 +04002626const struct vm_operations_struct generic_file_vm_ops = {
Nick Piggin54cb8822007-07-19 01:46:59 -07002627 .fault = filemap_fault,
Kirill A. Shutemovf1820362014-04-07 15:37:19 -07002628 .map_pages = filemap_map_pages,
Jan Kara4fcf1c62012-06-12 16:20:29 +02002629 .page_mkwrite = filemap_page_mkwrite,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630};
2631
2632/* This is used for a general mmap of a disk file */
2633
2634int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2635{
2636 struct address_space *mapping = file->f_mapping;
2637
2638 if (!mapping->a_ops->readpage)
2639 return -ENOEXEC;
2640 file_accessed(file);
2641 vma->vm_ops = &generic_file_vm_ops;
2642 return 0;
2643}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644
2645/*
2646 * This is for filesystems which do not implement ->writepage.
2647 */
2648int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
2649{
2650 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
2651 return -EINVAL;
2652 return generic_file_mmap(file, vma);
2653}
2654#else
2655int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
2656{
2657 return -ENOSYS;
2658}
2659int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
2660{
2661 return -ENOSYS;
2662}
2663#endif /* CONFIG_MMU */
2664
2665EXPORT_SYMBOL(generic_file_mmap);
2666EXPORT_SYMBOL(generic_file_readonly_mmap);
2667
Sasha Levin67f9fd92014-04-03 14:48:18 -07002668static struct page *wait_on_page_read(struct page *page)
2669{
2670 if (!IS_ERR(page)) {
2671 wait_on_page_locked(page);
2672 if (!PageUptodate(page)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002673 put_page(page);
Sasha Levin67f9fd92014-04-03 14:48:18 -07002674 page = ERR_PTR(-EIO);
2675 }
2676 }
2677 return page;
2678}
2679
Mel Gorman32b63522016-03-15 14:55:36 -07002680static struct page *do_read_cache_page(struct address_space *mapping,
Fengguang Wu57f6b962007-10-16 01:24:37 -07002681 pgoff_t index,
Hugh Dickins5e5358e2011-07-25 17:12:23 -07002682 int (*filler)(void *, struct page *),
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002683 void *data,
2684 gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685{
Nick Piggineb2be182007-10-16 01:24:57 -07002686 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 int err;
2688repeat:
2689 page = find_get_page(mapping, index);
2690 if (!page) {
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002691 page = __page_cache_alloc(gfp | __GFP_COLD);
Nick Piggineb2be182007-10-16 01:24:57 -07002692 if (!page)
2693 return ERR_PTR(-ENOMEM);
Dave Kleikampe6f67b82011-12-21 11:05:48 -06002694 err = add_to_page_cache_lru(page, mapping, index, gfp);
Nick Piggineb2be182007-10-16 01:24:57 -07002695 if (unlikely(err)) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002696 put_page(page);
Nick Piggineb2be182007-10-16 01:24:57 -07002697 if (err == -EEXIST)
2698 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 /* Presumably ENOMEM for radix tree node */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 return ERR_PTR(err);
2701 }
Mel Gorman32b63522016-03-15 14:55:36 -07002702
2703filler:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 err = filler(data, page);
2705 if (err < 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002706 put_page(page);
Mel Gorman32b63522016-03-15 14:55:36 -07002707 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 }
Mel Gorman32b63522016-03-15 14:55:36 -07002709
2710 page = wait_on_page_read(page);
2711 if (IS_ERR(page))
2712 return page;
2713 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 if (PageUptodate(page))
2716 goto out;
2717
Mel Gormanebded022016-03-15 14:55:39 -07002718 /*
2719 * Page is not up to date and may be locked due one of the following
2720 * case a: Page is being filled and the page lock is held
2721 * case b: Read/write error clearing the page uptodate status
2722 * case c: Truncation in progress (page locked)
2723 * case d: Reclaim in progress
2724 *
2725 * Case a, the page will be up to date when the page is unlocked.
2726 * There is no need to serialise on the page lock here as the page
2727 * is pinned so the lock gives no additional protection. Even if the
2728 * the page is truncated, the data is still valid if PageUptodate as
2729 * it's a race vs truncate race.
2730 * Case b, the page will not be up to date
2731 * Case c, the page may be truncated but in itself, the data may still
2732 * be valid after IO completes as it's a read vs truncate race. The
2733 * operation must restart if the page is not uptodate on unlock but
2734 * otherwise serialising on page lock to stabilise the mapping gives
2735 * no additional guarantees to the caller as the page lock is
2736 * released before return.
2737 * Case d, similar to truncation. If reclaim holds the page lock, it
2738 * will be a race with remove_mapping that determines if the mapping
2739 * is valid on unlock but otherwise the data is valid and there is
2740 * no need to serialise with page lock.
2741 *
2742 * As the page lock gives no additional guarantee, we optimistically
2743 * wait on the page to be unlocked and check if it's up to date and
2744 * use the page if it is. Otherwise, the page lock is required to
2745 * distinguish between the different cases. The motivation is that we
2746 * avoid spurious serialisations and wakeups when multiple processes
2747 * wait on the same page for IO to complete.
2748 */
2749 wait_on_page_locked(page);
2750 if (PageUptodate(page))
2751 goto out;
2752
2753 /* Distinguish between all the cases under the safety of the lock */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 lock_page(page);
Mel Gormanebded022016-03-15 14:55:39 -07002755
2756 /* Case c or d, restart the operation */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 if (!page->mapping) {
2758 unlock_page(page);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002759 put_page(page);
Mel Gorman32b63522016-03-15 14:55:36 -07002760 goto repeat;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 }
Mel Gormanebded022016-03-15 14:55:39 -07002762
2763 /* Someone else locked and filled the page in a very small window */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 if (PageUptodate(page)) {
2765 unlock_page(page);
2766 goto out;
2767 }
Mel Gorman32b63522016-03-15 14:55:36 -07002768 goto filler;
2769
David Howellsc855ff32007-05-09 13:42:20 +01002770out:
Nick Piggin6fe69002007-05-06 14:49:04 -07002771 mark_page_accessed(page);
2772 return page;
2773}
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002774
2775/**
Sasha Levin67f9fd92014-04-03 14:48:18 -07002776 * read_cache_page - read into page cache, fill it if needed
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002777 * @mapping: the page's address_space
2778 * @index: the page index
2779 * @filler: function to perform the read
Hugh Dickins5e5358e2011-07-25 17:12:23 -07002780 * @data: first arg to filler(data, page) function, often left as NULL
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002781 *
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002782 * Read into the page cache. If a page already exists, and PageUptodate() is
Sasha Levin67f9fd92014-04-03 14:48:18 -07002783 * not set, try to fill the page and wait for it to become unlocked.
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002784 *
2785 * If the page does not get brought uptodate, return -EIO.
2786 */
Sasha Levin67f9fd92014-04-03 14:48:18 -07002787struct page *read_cache_page(struct address_space *mapping,
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002788 pgoff_t index,
Hugh Dickins5e5358e2011-07-25 17:12:23 -07002789 int (*filler)(void *, struct page *),
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002790 void *data)
2791{
2792 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
2793}
Sasha Levin67f9fd92014-04-03 14:48:18 -07002794EXPORT_SYMBOL(read_cache_page);
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002795
2796/**
2797 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2798 * @mapping: the page's address_space
2799 * @index: the page index
2800 * @gfp: the page allocator flags to use if allocating
2801 *
2802 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
Dave Kleikampe6f67b82011-12-21 11:05:48 -06002803 * any new page allocations done using the specified allocation flags.
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002804 *
2805 * If the page does not get brought uptodate, return -EIO.
2806 */
2807struct page *read_cache_page_gfp(struct address_space *mapping,
2808 pgoff_t index,
2809 gfp_t gfp)
2810{
2811 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
2812
Sasha Levin67f9fd92014-04-03 14:48:18 -07002813 return do_read_cache_page(mapping, index, filler, NULL, gfp);
Linus Torvalds0531b2a2010-01-27 09:20:03 -08002814}
2815EXPORT_SYMBOL(read_cache_page_gfp);
2816
Nick Piggin2f718ff2007-10-16 01:24:59 -07002817/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 * Performs necessary checks before doing a write
2819 *
Randy Dunlap485bb992006-06-23 02:03:49 -07002820 * Can adjust writing position or amount of bytes to write.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 * Returns appropriate error code that caller should return or
2822 * zero in case that write should be allowed.
2823 */
Al Viro3309dd02015-04-09 12:55:47 -04002824inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825{
Al Viro3309dd02015-04-09 12:55:47 -04002826 struct file *file = iocb->ki_filp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 struct inode *inode = file->f_mapping->host;
Jiri Slaby59e99e52010-03-05 13:41:44 -08002828 unsigned long limit = rlimit(RLIMIT_FSIZE);
Al Viro3309dd02015-04-09 12:55:47 -04002829 loff_t pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Al Viro3309dd02015-04-09 12:55:47 -04002831 if (!iov_iter_count(from))
2832 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833
Al Viro0fa6b002015-04-04 04:05:48 -04002834 /* FIXME: this is for backwards compatibility with 2.4 */
Al Viro2ba48ce2015-04-09 13:52:01 -04002835 if (iocb->ki_flags & IOCB_APPEND)
Al Viro3309dd02015-04-09 12:55:47 -04002836 iocb->ki_pos = i_size_read(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
Al Viro3309dd02015-04-09 12:55:47 -04002838 pos = iocb->ki_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
Goldwyn Rodrigues6be96d32017-06-20 07:05:44 -05002840 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
2841 return -EINVAL;
2842
Al Viro0fa6b002015-04-04 04:05:48 -04002843 if (limit != RLIM_INFINITY) {
Al Viro3309dd02015-04-09 12:55:47 -04002844 if (iocb->ki_pos >= limit) {
Al Viro0fa6b002015-04-04 04:05:48 -04002845 send_sig(SIGXFSZ, current, 0);
2846 return -EFBIG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 }
Al Viro3309dd02015-04-09 12:55:47 -04002848 iov_iter_truncate(from, limit - (unsigned long)pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 }
2850
2851 /*
2852 * LFS rule
2853 */
Al Viro3309dd02015-04-09 12:55:47 -04002854 if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 !(file->f_flags & O_LARGEFILE))) {
Al Viro3309dd02015-04-09 12:55:47 -04002856 if (pos >= MAX_NON_LFS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 return -EFBIG;
Al Viro3309dd02015-04-09 12:55:47 -04002858 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 }
2860
2861 /*
2862 * Are we about to exceed the fs block limit ?
2863 *
2864 * If we have written data it becomes a short write. If we have
2865 * exceeded without writing data we send a signal and return EFBIG.
2866 * Linus frestrict idea will clean these up nicely..
2867 */
Al Viro3309dd02015-04-09 12:55:47 -04002868 if (unlikely(pos >= inode->i_sb->s_maxbytes))
2869 return -EFBIG;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
Al Viro3309dd02015-04-09 12:55:47 -04002871 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
2872 return iov_iter_count(from);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873}
2874EXPORT_SYMBOL(generic_write_checks);
2875
Nick Pigginafddba42007-10-16 01:25:01 -07002876int pagecache_write_begin(struct file *file, struct address_space *mapping,
2877 loff_t pos, unsigned len, unsigned flags,
2878 struct page **pagep, void **fsdata)
2879{
2880 const struct address_space_operations *aops = mapping->a_ops;
2881
Nick Piggin4e02ed42008-10-29 14:00:55 -07002882 return aops->write_begin(file, mapping, pos, len, flags,
Nick Pigginafddba42007-10-16 01:25:01 -07002883 pagep, fsdata);
Nick Pigginafddba42007-10-16 01:25:01 -07002884}
2885EXPORT_SYMBOL(pagecache_write_begin);
2886
2887int pagecache_write_end(struct file *file, struct address_space *mapping,
2888 loff_t pos, unsigned len, unsigned copied,
2889 struct page *page, void *fsdata)
2890{
2891 const struct address_space_operations *aops = mapping->a_ops;
Nick Pigginafddba42007-10-16 01:25:01 -07002892
Nick Piggin4e02ed42008-10-29 14:00:55 -07002893 return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
Nick Pigginafddba42007-10-16 01:25:01 -07002894}
2895EXPORT_SYMBOL(pagecache_write_end);
2896
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897ssize_t
Christoph Hellwig1af5bb42016-04-07 08:51:56 -07002898generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899{
2900 struct file *file = iocb->ki_filp;
2901 struct address_space *mapping = file->f_mapping;
2902 struct inode *inode = mapping->host;
Christoph Hellwig1af5bb42016-04-07 08:51:56 -07002903 loff_t pos = iocb->ki_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 ssize_t written;
Christoph Hellwiga969e902008-07-23 21:27:04 -07002905 size_t write_len;
2906 pgoff_t end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Al Viro0c949332014-03-22 06:51:37 -04002908 write_len = iov_iter_count(from);
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002909 end = (pos + write_len - 1) >> PAGE_SHIFT;
Christoph Hellwiga969e902008-07-23 21:27:04 -07002910
Goldwyn Rodrigues6be96d32017-06-20 07:05:44 -05002911 if (iocb->ki_flags & IOCB_NOWAIT) {
2912 /* If there are pages to writeback, return */
2913 if (filemap_range_has_page(inode->i_mapping, pos,
2914 pos + iov_iter_count(from)))
2915 return -EAGAIN;
2916 } else {
2917 written = filemap_write_and_wait_range(mapping, pos,
2918 pos + write_len - 1);
2919 if (written)
2920 goto out;
2921 }
Christoph Hellwiga969e902008-07-23 21:27:04 -07002922
2923 /*
2924 * After a write we want buffered reads to be sure to go to disk to get
2925 * the new data. We invalidate clean cached page from the region we're
2926 * about to write. We do this *before* the write so that we can return
Hisashi Hifumi6ccfa802008-09-02 14:35:40 -07002927 * without clobbering -EIOCBQUEUED from ->direct_IO().
Christoph Hellwiga969e902008-07-23 21:27:04 -07002928 */
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07002929 written = invalidate_inode_pages2_range(mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03002930 pos >> PAGE_SHIFT, end);
Andrey Ryabinin55635ba2017-05-03 14:55:59 -07002931 /*
2932 * If a page can not be invalidated, return 0 to fall back
2933 * to buffered write.
2934 */
2935 if (written) {
2936 if (written == -EBUSY)
2937 return 0;
2938 goto out;
Christoph Hellwiga969e902008-07-23 21:27:04 -07002939 }
2940
Al Viro639a93a52017-04-13 14:10:15 -04002941 written = mapping->a_ops->direct_IO(iocb, from);
Christoph Hellwiga969e902008-07-23 21:27:04 -07002942
2943 /*
2944 * Finally, try again to invalidate clean pages which might have been
2945 * cached by non-direct readahead, or faulted in by get_user_pages()
2946 * if the source of the write was an mmap'ed region of the file
2947 * we're writing. Either one is a pretty crazy thing to do,
2948 * so we don't support it 100%. If this invalidation
2949 * fails, tough, the write still worked...
Lukas Czerner332391a2017-09-21 08:16:29 -06002950 *
2951 * Most of the time we do not need this since dio_complete() will do
2952 * the invalidation for us. However there are some file systems that
2953 * do not end up with dio_complete() being called, so let's not break
2954 * them by removing it completely
Christoph Hellwiga969e902008-07-23 21:27:04 -07002955 */
Lukas Czerner332391a2017-09-21 08:16:29 -06002956 if (mapping->nrpages)
2957 invalidate_inode_pages2_range(mapping,
2958 pos >> PAGE_SHIFT, end);
Christoph Hellwiga969e902008-07-23 21:27:04 -07002959
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 if (written > 0) {
Namhyung Kim01166512010-10-26 14:21:58 -07002961 pos += written;
Al Viro639a93a52017-04-13 14:10:15 -04002962 write_len -= written;
Namhyung Kim01166512010-10-26 14:21:58 -07002963 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
2964 i_size_write(inode, pos);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 mark_inode_dirty(inode);
2966 }
Al Viro5cb6c6c2014-02-11 20:58:20 -05002967 iocb->ki_pos = pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 }
Al Viro639a93a52017-04-13 14:10:15 -04002969 iov_iter_revert(from, write_len - iov_iter_count(from));
Christoph Hellwiga969e902008-07-23 21:27:04 -07002970out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971 return written;
2972}
2973EXPORT_SYMBOL(generic_file_direct_write);
2974
Nick Piggineb2be182007-10-16 01:24:57 -07002975/*
2976 * Find or create a page at the given pagecache position. Return the locked
2977 * page. This function is specifically for buffered writes.
2978 */
Nick Piggin54566b22009-01-04 12:00:53 -08002979struct page *grab_cache_page_write_begin(struct address_space *mapping,
2980 pgoff_t index, unsigned flags)
Nick Piggineb2be182007-10-16 01:24:57 -07002981{
Nick Piggineb2be182007-10-16 01:24:57 -07002982 struct page *page;
Johannes Weinerbbddabe2016-05-20 16:56:28 -07002983 int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
Johannes Weiner0faa70c2012-01-10 15:07:53 -08002984
Nick Piggin54566b22009-01-04 12:00:53 -08002985 if (flags & AOP_FLAG_NOFS)
Mel Gorman2457aec2014-06-04 16:10:31 -07002986 fgp_flags |= FGP_NOFS;
Nick Piggineb2be182007-10-16 01:24:57 -07002987
Mel Gorman2457aec2014-06-04 16:10:31 -07002988 page = pagecache_get_page(mapping, index, fgp_flags,
Michal Hocko45f87de2014-12-29 20:30:35 +01002989 mapping_gfp_mask(mapping));
Mel Gorman2457aec2014-06-04 16:10:31 -07002990 if (page)
2991 wait_for_stable_page(page);
2992
Nick Piggineb2be182007-10-16 01:24:57 -07002993 return page;
2994}
Nick Piggin54566b22009-01-04 12:00:53 -08002995EXPORT_SYMBOL(grab_cache_page_write_begin);
Nick Piggineb2be182007-10-16 01:24:57 -07002996
Al Viro3b93f912014-02-11 21:34:08 -05002997ssize_t generic_perform_write(struct file *file,
Nick Pigginafddba42007-10-16 01:25:01 -07002998 struct iov_iter *i, loff_t pos)
2999{
3000 struct address_space *mapping = file->f_mapping;
3001 const struct address_space_operations *a_ops = mapping->a_ops;
3002 long status = 0;
3003 ssize_t written = 0;
Nick Piggin674b8922007-10-16 01:25:03 -07003004 unsigned int flags = 0;
3005
Nick Pigginafddba42007-10-16 01:25:01 -07003006 do {
3007 struct page *page;
Nick Pigginafddba42007-10-16 01:25:01 -07003008 unsigned long offset; /* Offset into pagecache page */
3009 unsigned long bytes; /* Bytes to write to page */
3010 size_t copied; /* Bytes copied from user */
3011 void *fsdata;
3012
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003013 offset = (pos & (PAGE_SIZE - 1));
3014 bytes = min_t(unsigned long, PAGE_SIZE - offset,
Nick Pigginafddba42007-10-16 01:25:01 -07003015 iov_iter_count(i));
3016
3017again:
Linus Torvalds00a3d662015-10-07 08:32:38 +01003018 /*
3019 * Bring in the user page that we will copy from _first_.
3020 * Otherwise there's a nasty deadlock on copying from the
3021 * same page as we're writing to, without it being marked
3022 * up-to-date.
3023 *
3024 * Not only is this an optimisation, but it is also required
3025 * to check that the address is actually valid, when atomic
3026 * usercopies are used, below.
3027 */
3028 if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
3029 status = -EFAULT;
3030 break;
3031 }
3032
Jan Kara296291c2015-10-22 13:32:21 -07003033 if (fatal_signal_pending(current)) {
3034 status = -EINTR;
3035 break;
3036 }
3037
Nick Piggin674b8922007-10-16 01:25:03 -07003038 status = a_ops->write_begin(file, mapping, pos, bytes, flags,
Nick Pigginafddba42007-10-16 01:25:01 -07003039 &page, &fsdata);
Mel Gorman2457aec2014-06-04 16:10:31 -07003040 if (unlikely(status < 0))
Nick Pigginafddba42007-10-16 01:25:01 -07003041 break;
3042
anfei zhou931e80e2010-02-02 13:44:02 -08003043 if (mapping_writably_mapped(mapping))
3044 flush_dcache_page(page);
Linus Torvalds00a3d662015-10-07 08:32:38 +01003045
Nick Pigginafddba42007-10-16 01:25:01 -07003046 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
Nick Pigginafddba42007-10-16 01:25:01 -07003047 flush_dcache_page(page);
3048
3049 status = a_ops->write_end(file, mapping, pos, bytes, copied,
3050 page, fsdata);
3051 if (unlikely(status < 0))
3052 break;
3053 copied = status;
3054
3055 cond_resched();
3056
Nick Piggin124d3b72008-02-02 15:01:17 +01003057 iov_iter_advance(i, copied);
Nick Pigginafddba42007-10-16 01:25:01 -07003058 if (unlikely(copied == 0)) {
3059 /*
3060 * If we were unable to copy any data at all, we must
3061 * fall back to a single segment length write.
3062 *
3063 * If we didn't fallback here, we could livelock
3064 * because not all segments in the iov can be copied at
3065 * once without a pagefault.
3066 */
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003067 bytes = min_t(unsigned long, PAGE_SIZE - offset,
Nick Pigginafddba42007-10-16 01:25:01 -07003068 iov_iter_single_seg_count(i));
3069 goto again;
3070 }
Nick Pigginafddba42007-10-16 01:25:01 -07003071 pos += copied;
3072 written += copied;
3073
3074 balance_dirty_pages_ratelimited(mapping);
Nick Pigginafddba42007-10-16 01:25:01 -07003075 } while (iov_iter_count(i));
3076
3077 return written ? written : status;
3078}
Al Viro3b93f912014-02-11 21:34:08 -05003079EXPORT_SYMBOL(generic_perform_write);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080
Jan Karae4dd9de2009-08-17 18:10:06 +02003081/**
Al Viro81742022014-04-03 03:17:43 -04003082 * __generic_file_write_iter - write data to a file
Jan Karae4dd9de2009-08-17 18:10:06 +02003083 * @iocb: IO state structure (file, offset, etc.)
Al Viro81742022014-04-03 03:17:43 -04003084 * @from: iov_iter with data to write
Jan Karae4dd9de2009-08-17 18:10:06 +02003085 *
3086 * This function does all the work needed for actually writing data to a
3087 * file. It does all basic checks, removes SUID from the file, updates
3088 * modification times and calls proper subroutines depending on whether we
3089 * do direct IO or a standard buffered write.
3090 *
3091 * It expects i_mutex to be grabbed unless we work on a block device or similar
3092 * object which does not need locking at all.
3093 *
3094 * This function does *not* take care of syncing data in case of O_SYNC write.
3095 * A caller has to handle it. This is mainly due to the fact that we want to
3096 * avoid syncing under i_mutex.
3097 */
Al Viro81742022014-04-03 03:17:43 -04003098ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099{
3100 struct file *file = iocb->ki_filp;
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003101 struct address_space * mapping = file->f_mapping;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003102 struct inode *inode = mapping->host;
Al Viro3b93f912014-02-11 21:34:08 -05003103 ssize_t written = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104 ssize_t err;
Al Viro3b93f912014-02-11 21:34:08 -05003105 ssize_t status;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106
Linus Torvalds1da177e2005-04-16 15:20:36 -07003107 /* We can write back this queue in page reclaim */
Christoph Hellwigde1414a2015-01-14 10:42:36 +01003108 current->backing_dev_info = inode_to_bdi(inode);
Jan Kara5fa8e0a2015-05-21 16:05:53 +02003109 err = file_remove_privs(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003110 if (err)
3111 goto out;
3112
Josef Bacikc3b2da32012-03-26 09:59:21 -04003113 err = file_update_time(file);
3114 if (err)
3115 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116
Al Viro2ba48ce2015-04-09 13:52:01 -04003117 if (iocb->ki_flags & IOCB_DIRECT) {
Al Viro0b8def92015-04-07 10:22:53 -04003118 loff_t pos, endbyte;
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003119
Christoph Hellwig1af5bb42016-04-07 08:51:56 -07003120 written = generic_file_direct_write(iocb, from);
Matthew Wilcoxfbbbad42015-02-16 15:58:53 -08003121 /*
3122 * If the write stopped short of completing, fall back to
3123 * buffered writes. Some filesystems do this for writes to
3124 * holes, for example. For DAX files, a buffered write will
3125 * not succeed (even if it did, DAX does not handle dirty
3126 * page-cache pages correctly).
3127 */
Al Viro0b8def92015-04-07 10:22:53 -04003128 if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003129 goto out;
Al Viro3b93f912014-02-11 21:34:08 -05003130
Al Viro0b8def92015-04-07 10:22:53 -04003131 status = generic_perform_write(file, from, pos = iocb->ki_pos);
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003132 /*
Al Viro3b93f912014-02-11 21:34:08 -05003133 * If generic_perform_write() returned a synchronous error
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003134 * then we want to return the number of bytes which were
3135 * direct-written, or the error code if that was zero. Note
3136 * that this differs from normal direct-io semantics, which
3137 * will return -EFOO even if some bytes were written.
3138 */
Al Viro60bb4522014-08-08 12:39:16 -04003139 if (unlikely(status < 0)) {
Al Viro3b93f912014-02-11 21:34:08 -05003140 err = status;
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003141 goto out;
3142 }
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003143 /*
3144 * We need to ensure that the page cache pages are written to
3145 * disk and invalidated to preserve the expected O_DIRECT
3146 * semantics.
3147 */
Al Viro3b93f912014-02-11 21:34:08 -05003148 endbyte = pos + status - 1;
Al Viro0b8def92015-04-07 10:22:53 -04003149 err = filemap_write_and_wait_range(mapping, pos, endbyte);
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003150 if (err == 0) {
Al Viro0b8def92015-04-07 10:22:53 -04003151 iocb->ki_pos = endbyte + 1;
Al Viro3b93f912014-02-11 21:34:08 -05003152 written += status;
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003153 invalidate_mapping_pages(mapping,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03003154 pos >> PAGE_SHIFT,
3155 endbyte >> PAGE_SHIFT);
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003156 } else {
3157 /*
3158 * We don't know how much we wrote, so just return
3159 * the number of bytes which were direct-written
3160 */
3161 }
3162 } else {
Al Viro0b8def92015-04-07 10:22:53 -04003163 written = generic_perform_write(file, from, iocb->ki_pos);
3164 if (likely(written > 0))
3165 iocb->ki_pos += written;
Jeff Moyerfb5527e2006-10-19 23:28:13 -07003166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167out:
3168 current->backing_dev_info = NULL;
3169 return written ? written : err;
3170}
Al Viro81742022014-04-03 03:17:43 -04003171EXPORT_SYMBOL(__generic_file_write_iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172
Jan Karae4dd9de2009-08-17 18:10:06 +02003173/**
Al Viro81742022014-04-03 03:17:43 -04003174 * generic_file_write_iter - write data to a file
Jan Karae4dd9de2009-08-17 18:10:06 +02003175 * @iocb: IO state structure
Al Viro81742022014-04-03 03:17:43 -04003176 * @from: iov_iter with data to write
Jan Karae4dd9de2009-08-17 18:10:06 +02003177 *
Al Viro81742022014-04-03 03:17:43 -04003178 * This is a wrapper around __generic_file_write_iter() to be used by most
Jan Karae4dd9de2009-08-17 18:10:06 +02003179 * filesystems. It takes care of syncing the file in case of O_SYNC file
3180 * and acquires i_mutex as needed.
3181 */
Al Viro81742022014-04-03 03:17:43 -04003182ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183{
3184 struct file *file = iocb->ki_filp;
Jan Kara148f9482009-08-17 19:52:36 +02003185 struct inode *inode = file->f_mapping->host;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187
Al Viro59551022016-01-22 15:40:57 -05003188 inode_lock(inode);
Al Viro3309dd02015-04-09 12:55:47 -04003189 ret = generic_write_checks(iocb, from);
3190 if (ret > 0)
Al Viro5f380c72015-04-07 11:28:12 -04003191 ret = __generic_file_write_iter(iocb, from);
Al Viro59551022016-01-22 15:40:57 -05003192 inode_unlock(inode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003193
Christoph Hellwige2592212016-04-07 08:52:01 -07003194 if (ret > 0)
3195 ret = generic_write_sync(iocb, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 return ret;
3197}
Al Viro81742022014-04-03 03:17:43 -04003198EXPORT_SYMBOL(generic_file_write_iter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199
David Howellscf9a2ae2006-08-29 19:05:54 +01003200/**
3201 * try_to_release_page() - release old fs-specific metadata on a page
3202 *
3203 * @page: the page which the kernel is trying to free
3204 * @gfp_mask: memory allocation flags (and I/O mode)
3205 *
3206 * The address_space is to try to release any data against the page
mchehab@s-opensource.com0e056eb2017-03-30 17:11:36 -03003207 * (presumably at page->private). If the release was successful, return '1'.
David Howellscf9a2ae2006-08-29 19:05:54 +01003208 * Otherwise return zero.
3209 *
David Howells266cf652009-04-03 16:42:36 +01003210 * This may also be called if PG_fscache is set on a page, indicating that the
3211 * page is known to the local caching routines.
3212 *
David Howellscf9a2ae2006-08-29 19:05:54 +01003213 * The @gfp_mask argument specifies whether I/O may be performed to release
Mel Gorman71baba42015-11-06 16:28:28 -08003214 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
David Howellscf9a2ae2006-08-29 19:05:54 +01003215 *
David Howellscf9a2ae2006-08-29 19:05:54 +01003216 */
3217int try_to_release_page(struct page *page, gfp_t gfp_mask)
3218{
3219 struct address_space * const mapping = page->mapping;
3220
3221 BUG_ON(!PageLocked(page));
3222 if (PageWriteback(page))
3223 return 0;
3224
3225 if (mapping && mapping->a_ops->releasepage)
3226 return mapping->a_ops->releasepage(page, gfp_mask);
3227 return try_to_free_buffers(page);
3228}
3229
3230EXPORT_SYMBOL(try_to_release_page);