blob: f757cd0e2d077a13a251faf68b67566e6c2281f6 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler9973c982016-01-22 15:10:47 -080027#include <linux/pagevec.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080028#include <linux/sched.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010029#include <linux/sched/signal.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080031#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080032#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080033#include <linux/sizes.h>
Jan Kara4b4bb462016-12-14 15:07:53 -080034#include <linux/mmu_notifier.h>
Christoph Hellwiga254e562016-09-19 11:24:49 +100035#include <linux/iomap.h>
36#include "internal.h"
Matthew Wilcoxd475c632015-02-16 15:58:56 -080037
Ross Zwisler282a8e02017-02-22 15:39:50 -080038#define CREATE_TRACE_POINTS
39#include <trace/events/fs_dax.h>
40
Jan Karaac401cc2016-05-12 18:29:18 +020041/* We choose 4096 entries - same as per-zone page wait tables */
42#define DAX_WAIT_TABLE_BITS 12
43#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
44
Ross Zwisler917f3452017-09-06 16:18:58 -070045/* The 'colour' (ie low bits) within a PMD of a page offset. */
46#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47
Ross Zwislerce95ab0f2016-11-08 11:31:44 +110048static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
Jan Karaac401cc2016-05-12 18:29:18 +020049
50static int __init init_dax_wait_table(void)
51{
52 int i;
53
54 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
55 init_waitqueue_head(wait_table + i);
56 return 0;
57}
58fs_initcall(init_dax_wait_table);
59
Ross Zwisler527b19d2017-09-06 16:18:51 -070060/*
61 * We use lowest available bit in exceptional entry for locking, one bit for
62 * the entry size (PMD) and two more to tell us if the entry is a zero page or
63 * an empty entry that is just used for locking. In total four special bits.
64 *
65 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
66 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
67 * block allocation.
68 */
69#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
70#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
71#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
72#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
73#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
74
75static unsigned long dax_radix_sector(void *entry)
76{
77 return (unsigned long)entry >> RADIX_DAX_SHIFT;
78}
79
80static void *dax_radix_locked_entry(sector_t sector, unsigned long flags)
81{
82 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | flags |
83 ((unsigned long)sector << RADIX_DAX_SHIFT) |
84 RADIX_DAX_ENTRY_LOCK);
85}
86
87static unsigned int dax_radix_order(void *entry)
88{
89 if ((unsigned long)entry & RADIX_DAX_PMD)
90 return PMD_SHIFT - PAGE_SHIFT;
91 return 0;
92}
93
Ross Zwisler642261a2016-11-08 11:34:45 +110094static int dax_is_pmd_entry(void *entry)
95{
96 return (unsigned long)entry & RADIX_DAX_PMD;
97}
98
99static int dax_is_pte_entry(void *entry)
100{
101 return !((unsigned long)entry & RADIX_DAX_PMD);
102}
103
104static int dax_is_zero_entry(void *entry)
105{
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700106 return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
Ross Zwisler642261a2016-11-08 11:34:45 +1100107}
108
109static int dax_is_empty_entry(void *entry)
110{
111 return (unsigned long)entry & RADIX_DAX_EMPTY;
112}
113
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800114/*
Jan Karaac401cc2016-05-12 18:29:18 +0200115 * DAX radix tree locking
116 */
117struct exceptional_entry_key {
118 struct address_space *mapping;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100119 pgoff_t entry_start;
Jan Karaac401cc2016-05-12 18:29:18 +0200120};
121
122struct wait_exceptional_entry_queue {
Ingo Molnarac6424b2017-06-20 12:06:13 +0200123 wait_queue_entry_t wait;
Jan Karaac401cc2016-05-12 18:29:18 +0200124 struct exceptional_entry_key key;
125};
126
Ross Zwisler63e95b52016-11-08 11:32:20 +1100127static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
128 pgoff_t index, void *entry, struct exceptional_entry_key *key)
129{
130 unsigned long hash;
131
132 /*
133 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 * queue to the start of that PMD. This ensures that all offsets in
135 * the range covered by the PMD map to the same bit lock.
136 */
Ross Zwisler642261a2016-11-08 11:34:45 +1100137 if (dax_is_pmd_entry(entry))
Ross Zwisler917f3452017-09-06 16:18:58 -0700138 index &= ~PG_PMD_COLOUR;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100139
140 key->mapping = mapping;
141 key->entry_start = index;
142
143 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
144 return wait_table + hash;
145}
146
Ingo Molnarac6424b2017-06-20 12:06:13 +0200147static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
Jan Karaac401cc2016-05-12 18:29:18 +0200148 int sync, void *keyp)
149{
150 struct exceptional_entry_key *key = keyp;
151 struct wait_exceptional_entry_queue *ewait =
152 container_of(wait, struct wait_exceptional_entry_queue, wait);
153
154 if (key->mapping != ewait->key.mapping ||
Ross Zwisler63e95b52016-11-08 11:32:20 +1100155 key->entry_start != ewait->key.entry_start)
Jan Karaac401cc2016-05-12 18:29:18 +0200156 return 0;
157 return autoremove_wake_function(wait, mode, sync, NULL);
158}
159
160/*
Ross Zwislere30331f2017-09-06 16:18:39 -0700161 * We do not necessarily hold the mapping->tree_lock when we call this
162 * function so it is possible that 'entry' is no longer a valid item in the
163 * radix tree. This is okay because all we really need to do is to find the
164 * correct waitqueue where tasks might be waiting for that old 'entry' and
165 * wake them.
166 */
Ross Zwislerd01ad192017-09-06 16:18:47 -0700167static void dax_wake_mapping_entry_waiter(struct address_space *mapping,
Ross Zwislere30331f2017-09-06 16:18:39 -0700168 pgoff_t index, void *entry, bool wake_all)
169{
170 struct exceptional_entry_key key;
171 wait_queue_head_t *wq;
172
173 wq = dax_entry_waitqueue(mapping, index, entry, &key);
174
175 /*
176 * Checking for locked entry and prepare_to_wait_exclusive() happens
177 * under mapping->tree_lock, ditto for entry handling in our callers.
178 * So at this point all tasks that could have seen our entry locked
179 * must be in the waitqueue and the following check will see them.
180 */
181 if (waitqueue_active(wq))
182 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
183}
184
185/*
Jan Karaac401cc2016-05-12 18:29:18 +0200186 * Check whether the given slot is locked. The function must be called with
187 * mapping->tree_lock held
188 */
189static inline int slot_locked(struct address_space *mapping, void **slot)
190{
191 unsigned long entry = (unsigned long)
192 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
193 return entry & RADIX_DAX_ENTRY_LOCK;
194}
195
196/*
197 * Mark the given slot is locked. The function must be called with
198 * mapping->tree_lock held
199 */
200static inline void *lock_slot(struct address_space *mapping, void **slot)
201{
202 unsigned long entry = (unsigned long)
203 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
204
205 entry |= RADIX_DAX_ENTRY_LOCK;
Johannes Weiner6d75f362016-12-12 16:43:43 -0800206 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200207 return (void *)entry;
208}
209
210/*
211 * Mark the given slot is unlocked. The function must be called with
212 * mapping->tree_lock held
213 */
214static inline void *unlock_slot(struct address_space *mapping, void **slot)
215{
216 unsigned long entry = (unsigned long)
217 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
218
219 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
Johannes Weiner6d75f362016-12-12 16:43:43 -0800220 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200221 return (void *)entry;
222}
223
224/*
225 * Lookup entry in radix tree, wait for it to become unlocked if it is
226 * exceptional entry and return it. The caller must call
227 * put_unlocked_mapping_entry() when he decided not to lock the entry or
228 * put_locked_mapping_entry() when he locked the entry and now wants to
229 * unlock it.
230 *
231 * The function must be called with mapping->tree_lock held.
232 */
233static void *get_unlocked_mapping_entry(struct address_space *mapping,
234 pgoff_t index, void ***slotp)
235{
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100236 void *entry, **slot;
Jan Karaac401cc2016-05-12 18:29:18 +0200237 struct wait_exceptional_entry_queue ewait;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100238 wait_queue_head_t *wq;
Jan Karaac401cc2016-05-12 18:29:18 +0200239
240 init_wait(&ewait.wait);
241 ewait.wait.func = wake_exceptional_entry_func;
Jan Karaac401cc2016-05-12 18:29:18 +0200242
243 for (;;) {
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100244 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
Jan Karaac401cc2016-05-12 18:29:18 +0200245 &slot);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700246 if (!entry ||
247 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
Jan Karaac401cc2016-05-12 18:29:18 +0200248 !slot_locked(mapping, slot)) {
249 if (slotp)
250 *slotp = slot;
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100251 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200252 }
Ross Zwisler63e95b52016-11-08 11:32:20 +1100253
254 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
Jan Karaac401cc2016-05-12 18:29:18 +0200255 prepare_to_wait_exclusive(wq, &ewait.wait,
256 TASK_UNINTERRUPTIBLE);
257 spin_unlock_irq(&mapping->tree_lock);
258 schedule();
259 finish_wait(wq, &ewait.wait);
260 spin_lock_irq(&mapping->tree_lock);
261 }
262}
263
Jan Karab1aa8122016-12-14 15:07:24 -0800264static void dax_unlock_mapping_entry(struct address_space *mapping,
265 pgoff_t index)
266{
267 void *entry, **slot;
268
269 spin_lock_irq(&mapping->tree_lock);
270 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
271 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
272 !slot_locked(mapping, slot))) {
273 spin_unlock_irq(&mapping->tree_lock);
274 return;
275 }
276 unlock_slot(mapping, slot);
277 spin_unlock_irq(&mapping->tree_lock);
278 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
279}
280
Jan Karaac401cc2016-05-12 18:29:18 +0200281static void put_locked_mapping_entry(struct address_space *mapping,
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700282 pgoff_t index)
Jan Karaac401cc2016-05-12 18:29:18 +0200283{
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700284 dax_unlock_mapping_entry(mapping, index);
Jan Karaac401cc2016-05-12 18:29:18 +0200285}
286
287/*
288 * Called when we are done with radix tree entry we looked up via
289 * get_unlocked_mapping_entry() and which we didn't lock in the end.
290 */
291static void put_unlocked_mapping_entry(struct address_space *mapping,
292 pgoff_t index, void *entry)
293{
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700294 if (!entry)
Jan Karaac401cc2016-05-12 18:29:18 +0200295 return;
296
297 /* We have to wake up next waiter for the radix tree entry lock */
Ross Zwisler422476c2016-11-08 11:33:44 +1100298 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
299}
300
Jan Karaac401cc2016-05-12 18:29:18 +0200301/*
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700302 * Find radix tree entry at given index. If it points to an exceptional entry,
303 * return it with the radix tree entry locked. If the radix tree doesn't
304 * contain given index, create an empty exceptional entry for the index and
305 * return with it locked.
Jan Karaac401cc2016-05-12 18:29:18 +0200306 *
Ross Zwisler642261a2016-11-08 11:34:45 +1100307 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
308 * either return that locked entry or will return an error. This error will
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700309 * happen if there are any 4k entries within the 2MiB range that we are
310 * requesting.
Ross Zwisler642261a2016-11-08 11:34:45 +1100311 *
312 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
313 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
314 * insertion will fail if it finds any 4k entries already in the tree, and a
315 * 4k insertion will cause an existing 2MiB entry to be unmapped and
316 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
317 * well as 2MiB empty entries.
318 *
319 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
320 * real storage backing them. We will leave these real 2MiB DAX entries in
321 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
322 *
Jan Karaac401cc2016-05-12 18:29:18 +0200323 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
324 * persistent memory the benefit is doubtful. We can add that later if we can
325 * show it helps.
326 */
Ross Zwisler642261a2016-11-08 11:34:45 +1100327static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
328 unsigned long size_flag)
Jan Karaac401cc2016-05-12 18:29:18 +0200329{
Ross Zwisler642261a2016-11-08 11:34:45 +1100330 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100331 void *entry, **slot;
Jan Karaac401cc2016-05-12 18:29:18 +0200332
333restart:
334 spin_lock_irq(&mapping->tree_lock);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100335 entry = get_unlocked_mapping_entry(mapping, index, &slot);
Ross Zwisler642261a2016-11-08 11:34:45 +1100336
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700337 if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
338 entry = ERR_PTR(-EIO);
339 goto out_unlock;
340 }
341
Ross Zwisler642261a2016-11-08 11:34:45 +1100342 if (entry) {
343 if (size_flag & RADIX_DAX_PMD) {
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700344 if (dax_is_pte_entry(entry)) {
Ross Zwisler642261a2016-11-08 11:34:45 +1100345 put_unlocked_mapping_entry(mapping, index,
346 entry);
347 entry = ERR_PTR(-EEXIST);
348 goto out_unlock;
349 }
350 } else { /* trying to grab a PTE entry */
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700351 if (dax_is_pmd_entry(entry) &&
Ross Zwisler642261a2016-11-08 11:34:45 +1100352 (dax_is_zero_entry(entry) ||
353 dax_is_empty_entry(entry))) {
354 pmd_downgrade = true;
355 }
356 }
357 }
358
Jan Karaac401cc2016-05-12 18:29:18 +0200359 /* No entry for given index? Make sure radix tree is big enough. */
Ross Zwisler642261a2016-11-08 11:34:45 +1100360 if (!entry || pmd_downgrade) {
Jan Karaac401cc2016-05-12 18:29:18 +0200361 int err;
362
Ross Zwisler642261a2016-11-08 11:34:45 +1100363 if (pmd_downgrade) {
364 /*
365 * Make sure 'entry' remains valid while we drop
366 * mapping->tree_lock.
367 */
368 entry = lock_slot(mapping, slot);
369 }
370
Jan Karaac401cc2016-05-12 18:29:18 +0200371 spin_unlock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100372 /*
373 * Besides huge zero pages the only other thing that gets
374 * downgraded are empty entries which don't need to be
375 * unmapped.
376 */
377 if (pmd_downgrade && dax_is_zero_entry(entry))
378 unmap_mapping_range(mapping,
379 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
380
Jan Kara0cb80b42016-12-12 21:34:12 -0500381 err = radix_tree_preload(
382 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
383 if (err) {
384 if (pmd_downgrade)
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700385 put_locked_mapping_entry(mapping, index);
Jan Kara0cb80b42016-12-12 21:34:12 -0500386 return ERR_PTR(err);
387 }
Jan Karaac401cc2016-05-12 18:29:18 +0200388 spin_lock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100389
Ross Zwislere11f8b72017-04-07 16:04:57 -0700390 if (!entry) {
391 /*
392 * We needed to drop the page_tree lock while calling
393 * radix_tree_preload() and we didn't have an entry to
394 * lock. See if another thread inserted an entry at
395 * our index during this time.
396 */
397 entry = __radix_tree_lookup(&mapping->page_tree, index,
398 NULL, &slot);
399 if (entry) {
400 radix_tree_preload_end();
401 spin_unlock_irq(&mapping->tree_lock);
402 goto restart;
403 }
404 }
405
Ross Zwisler642261a2016-11-08 11:34:45 +1100406 if (pmd_downgrade) {
407 radix_tree_delete(&mapping->page_tree, index);
408 mapping->nrexceptional--;
409 dax_wake_mapping_entry_waiter(mapping, index, entry,
410 true);
411 }
412
413 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
414
415 err = __radix_tree_insert(&mapping->page_tree, index,
416 dax_radix_order(entry), entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200417 radix_tree_preload_end();
418 if (err) {
419 spin_unlock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100420 /*
Ross Zwislere11f8b72017-04-07 16:04:57 -0700421 * Our insertion of a DAX entry failed, most likely
422 * because we were inserting a PMD entry and it
423 * collided with a PTE sized entry at a different
424 * index in the PMD range. We haven't inserted
425 * anything into the radix tree and have no waiters to
426 * wake.
Ross Zwisler642261a2016-11-08 11:34:45 +1100427 */
Jan Karaac401cc2016-05-12 18:29:18 +0200428 return ERR_PTR(err);
429 }
430 /* Good, we have inserted empty locked entry into the tree. */
431 mapping->nrexceptional++;
432 spin_unlock_irq(&mapping->tree_lock);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100433 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200434 }
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100435 entry = lock_slot(mapping, slot);
Ross Zwisler642261a2016-11-08 11:34:45 +1100436 out_unlock:
Jan Karaac401cc2016-05-12 18:29:18 +0200437 spin_unlock_irq(&mapping->tree_lock);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100438 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200439}
440
Jan Karac6dcf522016-08-10 17:22:44 +0200441static int __dax_invalidate_mapping_entry(struct address_space *mapping,
442 pgoff_t index, bool trunc)
443{
444 int ret = 0;
445 void *entry;
446 struct radix_tree_root *page_tree = &mapping->page_tree;
447
448 spin_lock_irq(&mapping->tree_lock);
449 entry = get_unlocked_mapping_entry(mapping, index, NULL);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700450 if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
Jan Karac6dcf522016-08-10 17:22:44 +0200451 goto out;
452 if (!trunc &&
453 (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
454 radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
455 goto out;
456 radix_tree_delete(page_tree, index);
457 mapping->nrexceptional--;
458 ret = 1;
459out:
460 put_unlocked_mapping_entry(mapping, index, entry);
461 spin_unlock_irq(&mapping->tree_lock);
462 return ret;
463}
Jan Karaac401cc2016-05-12 18:29:18 +0200464/*
465 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
466 * entry to get unlocked before deleting it.
467 */
468int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
469{
Jan Karac6dcf522016-08-10 17:22:44 +0200470 int ret = __dax_invalidate_mapping_entry(mapping, index, true);
Jan Karaac401cc2016-05-12 18:29:18 +0200471
Jan Karaac401cc2016-05-12 18:29:18 +0200472 /*
473 * This gets called from truncate / punch_hole path. As such, the caller
474 * must hold locks protecting against concurrent modifications of the
475 * radix tree (usually fs-private i_mmap_sem for writing). Since the
476 * caller has seen exceptional entry for this index, we better find it
477 * at that index as well...
478 */
Jan Karac6dcf522016-08-10 17:22:44 +0200479 WARN_ON_ONCE(!ret);
480 return ret;
481}
Jan Karaac401cc2016-05-12 18:29:18 +0200482
Jan Karac6dcf522016-08-10 17:22:44 +0200483/*
Jan Karac6dcf522016-08-10 17:22:44 +0200484 * Invalidate exceptional DAX entry if it is clean.
485 */
486int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
487 pgoff_t index)
488{
489 return __dax_invalidate_mapping_entry(mapping, index, false);
Jan Karaac401cc2016-05-12 18:29:18 +0200490}
491
Dan Williamscccbce62017-01-27 13:31:42 -0800492static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
493 sector_t sector, size_t size, struct page *to,
494 unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800495{
Dan Williamscccbce62017-01-27 13:31:42 -0800496 void *vto, *kaddr;
497 pgoff_t pgoff;
498 pfn_t pfn;
499 long rc;
500 int id;
Ross Zwislere2e05392015-08-18 13:55:41 -0600501
Dan Williamscccbce62017-01-27 13:31:42 -0800502 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
503 if (rc)
504 return rc;
505
506 id = dax_read_lock();
507 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
508 if (rc < 0) {
509 dax_read_unlock(id);
510 return rc;
511 }
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800512 vto = kmap_atomic(to);
Dan Williamscccbce62017-01-27 13:31:42 -0800513 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800514 kunmap_atomic(vto);
Dan Williamscccbce62017-01-27 13:31:42 -0800515 dax_read_unlock(id);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800516 return 0;
517}
518
Ross Zwisler642261a2016-11-08 11:34:45 +1100519/*
520 * By this point grab_mapping_entry() has ensured that we have a locked entry
521 * of the appropriate size so we don't have to worry about downgrading PMDs to
522 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
523 * already in the tree, we will skip the insertion and just dirty the PMD as
524 * appropriate.
525 */
Jan Karaac401cc2016-05-12 18:29:18 +0200526static void *dax_insert_mapping_entry(struct address_space *mapping,
527 struct vm_fault *vmf,
Ross Zwisler642261a2016-11-08 11:34:45 +1100528 void *entry, sector_t sector,
Jan Karaf5b7b742017-11-01 16:36:40 +0100529 unsigned long flags, bool dirty)
Ross Zwisler9973c982016-01-22 15:10:47 -0800530{
531 struct radix_tree_root *page_tree = &mapping->page_tree;
Jan Karaac401cc2016-05-12 18:29:18 +0200532 void *new_entry;
533 pgoff_t index = vmf->pgoff;
Ross Zwisler9973c982016-01-22 15:10:47 -0800534
Jan Karaf5b7b742017-11-01 16:36:40 +0100535 if (dirty)
Dmitry Monakhovd2b2a282016-02-05 15:36:55 -0800536 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Ross Zwisler9973c982016-01-22 15:10:47 -0800537
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700538 if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
539 /* we are replacing a zero page with block mapping */
540 if (dax_is_pmd_entry(entry))
541 unmap_mapping_range(mapping,
542 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
543 PMD_SIZE, 0);
544 else /* pte entry */
545 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
546 PAGE_SIZE, 0);
Ross Zwisler9973c982016-01-22 15:10:47 -0800547 }
548
Jan Karaac401cc2016-05-12 18:29:18 +0200549 spin_lock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100550 new_entry = dax_radix_locked_entry(sector, flags);
551
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700552 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
Ross Zwisler642261a2016-11-08 11:34:45 +1100553 /*
554 * Only swap our new entry into the radix tree if the current
555 * entry is a zero page or an empty entry. If a normal PTE or
556 * PMD entry is already in the tree, we leave it alone. This
557 * means that if we are trying to insert a PTE and the
558 * existing entry is a PMD, we will just leave the PMD in the
559 * tree and dirty it if necessary.
560 */
Johannes Weinerf7942432016-12-12 16:43:41 -0800561 struct radix_tree_node *node;
Jan Karaac401cc2016-05-12 18:29:18 +0200562 void **slot;
563 void *ret;
Ross Zwisler9973c982016-01-22 15:10:47 -0800564
Johannes Weinerf7942432016-12-12 16:43:41 -0800565 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
Jan Karaac401cc2016-05-12 18:29:18 +0200566 WARN_ON_ONCE(ret != entry);
Johannes Weiner4d693d02016-12-12 16:43:49 -0800567 __radix_tree_replace(page_tree, node, slot,
568 new_entry, NULL, NULL);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700569 entry = new_entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800570 }
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700571
Jan Karaf5b7b742017-11-01 16:36:40 +0100572 if (dirty)
Ross Zwisler9973c982016-01-22 15:10:47 -0800573 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700574
Ross Zwisler9973c982016-01-22 15:10:47 -0800575 spin_unlock_irq(&mapping->tree_lock);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700576 return entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800577}
578
Jan Kara4b4bb462016-12-14 15:07:53 -0800579static inline unsigned long
580pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
581{
582 unsigned long address;
583
584 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
585 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
586 return address;
587}
588
589/* Walk all mappings of a given index of a file and writeprotect them */
590static void dax_mapping_entry_mkclean(struct address_space *mapping,
591 pgoff_t index, unsigned long pfn)
592{
593 struct vm_area_struct *vma;
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800594 pte_t pte, *ptep = NULL;
595 pmd_t *pmdp = NULL;
Jan Kara4b4bb462016-12-14 15:07:53 -0800596 spinlock_t *ptl;
Jan Kara4b4bb462016-12-14 15:07:53 -0800597
598 i_mmap_lock_read(mapping);
599 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400600 unsigned long address, start, end;
Jan Kara4b4bb462016-12-14 15:07:53 -0800601
602 cond_resched();
603
604 if (!(vma->vm_flags & VM_SHARED))
605 continue;
606
607 address = pgoff_address(index, vma);
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400608
609 /*
610 * Note because we provide start/end to follow_pte_pmd it will
611 * call mmu_notifier_invalidate_range_start() on our behalf
612 * before taking any lock.
613 */
614 if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
Jan Kara4b4bb462016-12-14 15:07:53 -0800615 continue;
Jan Kara4b4bb462016-12-14 15:07:53 -0800616
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800617 if (pmdp) {
618#ifdef CONFIG_FS_DAX_PMD
619 pmd_t pmd;
620
621 if (pfn != pmd_pfn(*pmdp))
622 goto unlock_pmd;
623 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
624 goto unlock_pmd;
625
626 flush_cache_page(vma, address, pfn);
627 pmd = pmdp_huge_clear_flush(vma, address, pmdp);
628 pmd = pmd_wrprotect(pmd);
629 pmd = pmd_mkclean(pmd);
630 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400631 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800632unlock_pmd:
633 spin_unlock(ptl);
634#endif
635 } else {
636 if (pfn != pte_pfn(*ptep))
637 goto unlock_pte;
638 if (!pte_dirty(*ptep) && !pte_write(*ptep))
639 goto unlock_pte;
640
641 flush_cache_page(vma, address, pfn);
642 pte = ptep_clear_flush(vma, address, ptep);
643 pte = pte_wrprotect(pte);
644 pte = pte_mkclean(pte);
645 set_pte_at(vma->vm_mm, address, ptep, pte);
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400646 mmu_notifier_invalidate_range(vma->vm_mm, start, end);
Ross Zwislerf729c8c2017-01-10 16:57:24 -0800647unlock_pte:
648 pte_unmap_unlock(ptep, ptl);
649 }
Jan Kara4b4bb462016-12-14 15:07:53 -0800650
Jérôme Glissea4d1a882017-08-31 17:17:26 -0400651 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
Jan Kara4b4bb462016-12-14 15:07:53 -0800652 }
653 i_mmap_unlock_read(mapping);
654}
655
Ross Zwisler9973c982016-01-22 15:10:47 -0800656static int dax_writeback_one(struct block_device *bdev,
Dan Williamscccbce62017-01-27 13:31:42 -0800657 struct dax_device *dax_dev, struct address_space *mapping,
658 pgoff_t index, void *entry)
Ross Zwisler9973c982016-01-22 15:10:47 -0800659{
660 struct radix_tree_root *page_tree = &mapping->page_tree;
Dan Williamscccbce62017-01-27 13:31:42 -0800661 void *entry2, **slot, *kaddr;
662 long ret = 0, id;
663 sector_t sector;
664 pgoff_t pgoff;
665 size_t size;
666 pfn_t pfn;
Ross Zwisler9973c982016-01-22 15:10:47 -0800667
Ross Zwisler9973c982016-01-22 15:10:47 -0800668 /*
Jan Karaa6abc2c2016-12-14 15:07:47 -0800669 * A page got tagged dirty in DAX mapping? Something is seriously
670 * wrong.
Ross Zwisler9973c982016-01-22 15:10:47 -0800671 */
Jan Karaa6abc2c2016-12-14 15:07:47 -0800672 if (WARN_ON(!radix_tree_exceptional_entry(entry)))
673 return -EIO;
Ross Zwisler9973c982016-01-22 15:10:47 -0800674
Jan Karaa6abc2c2016-12-14 15:07:47 -0800675 spin_lock_irq(&mapping->tree_lock);
676 entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
677 /* Entry got punched out / reallocated? */
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700678 if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
Jan Karaa6abc2c2016-12-14 15:07:47 -0800679 goto put_unlocked;
680 /*
681 * Entry got reallocated elsewhere? No need to writeback. We have to
682 * compare sectors as we must not bail out due to difference in lockbit
683 * or entry type.
684 */
685 if (dax_radix_sector(entry2) != dax_radix_sector(entry))
686 goto put_unlocked;
Ross Zwisler642261a2016-11-08 11:34:45 +1100687 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
688 dax_is_zero_entry(entry))) {
Ross Zwisler9973c982016-01-22 15:10:47 -0800689 ret = -EIO;
Jan Karaa6abc2c2016-12-14 15:07:47 -0800690 goto put_unlocked;
Ross Zwisler9973c982016-01-22 15:10:47 -0800691 }
692
Jan Karaa6abc2c2016-12-14 15:07:47 -0800693 /* Another fsync thread may have already written back this entry */
694 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
695 goto put_unlocked;
696 /* Lock the entry to serialize with page faults */
697 entry = lock_slot(mapping, slot);
698 /*
699 * We can clear the tag now but we have to be careful so that concurrent
700 * dax_writeback_one() calls for the same index cannot finish before we
701 * actually flush the caches. This is achieved as the calls will look
702 * at the entry only under tree_lock and once they do that they will
703 * see the entry locked and wait for it to unlock.
704 */
705 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
706 spin_unlock_irq(&mapping->tree_lock);
707
Ross Zwisler642261a2016-11-08 11:34:45 +1100708 /*
709 * Even if dax_writeback_mapping_range() was given a wbc->range_start
710 * in the middle of a PMD, the 'index' we are given will be aligned to
711 * the start index of the PMD, as will the sector we pull from
712 * 'entry'. This allows us to flush for PMD_SIZE and not have to
713 * worry about partial PMD writebacks.
714 */
Dan Williamscccbce62017-01-27 13:31:42 -0800715 sector = dax_radix_sector(entry);
716 size = PAGE_SIZE << dax_radix_order(entry);
717
718 id = dax_read_lock();
719 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
720 if (ret)
721 goto dax_unlock;
Ross Zwisler9973c982016-01-22 15:10:47 -0800722
723 /*
Dan Williamscccbce62017-01-27 13:31:42 -0800724 * dax_direct_access() may sleep, so cannot hold tree_lock over
725 * its invocation.
Ross Zwisler9973c982016-01-22 15:10:47 -0800726 */
Dan Williamscccbce62017-01-27 13:31:42 -0800727 ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
728 if (ret < 0)
729 goto dax_unlock;
Ross Zwisler9973c982016-01-22 15:10:47 -0800730
Dan Williamscccbce62017-01-27 13:31:42 -0800731 if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
Ross Zwisler9973c982016-01-22 15:10:47 -0800732 ret = -EIO;
Dan Williamscccbce62017-01-27 13:31:42 -0800733 goto dax_unlock;
Ross Zwisler9973c982016-01-22 15:10:47 -0800734 }
735
Dan Williamscccbce62017-01-27 13:31:42 -0800736 dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400737 dax_flush(dax_dev, kaddr, size);
Jan Kara4b4bb462016-12-14 15:07:53 -0800738 /*
739 * After we have flushed the cache, we can clear the dirty tag. There
740 * cannot be new dirty data in the pfn after the flush has completed as
741 * the pfn mappings are writeprotected and fault waits for mapping
742 * entry lock.
743 */
744 spin_lock_irq(&mapping->tree_lock);
745 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
746 spin_unlock_irq(&mapping->tree_lock);
Ross Zwislerf9bc3a02017-05-08 16:00:13 -0700747 trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
Dan Williamscccbce62017-01-27 13:31:42 -0800748 dax_unlock:
749 dax_read_unlock(id);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700750 put_locked_mapping_entry(mapping, index);
Ross Zwisler9973c982016-01-22 15:10:47 -0800751 return ret;
752
Jan Karaa6abc2c2016-12-14 15:07:47 -0800753 put_unlocked:
754 put_unlocked_mapping_entry(mapping, index, entry2);
Ross Zwisler9973c982016-01-22 15:10:47 -0800755 spin_unlock_irq(&mapping->tree_lock);
756 return ret;
757}
758
759/*
760 * Flush the mapping to the persistent domain within the byte range of [start,
761 * end]. This is required by data integrity operations to ensure file data is
762 * on persistent storage prior to completion of the operation.
763 */
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800764int dax_writeback_mapping_range(struct address_space *mapping,
765 struct block_device *bdev, struct writeback_control *wbc)
Ross Zwisler9973c982016-01-22 15:10:47 -0800766{
767 struct inode *inode = mapping->host;
Ross Zwisler642261a2016-11-08 11:34:45 +1100768 pgoff_t start_index, end_index;
Ross Zwisler9973c982016-01-22 15:10:47 -0800769 pgoff_t indices[PAGEVEC_SIZE];
Dan Williamscccbce62017-01-27 13:31:42 -0800770 struct dax_device *dax_dev;
Ross Zwisler9973c982016-01-22 15:10:47 -0800771 struct pagevec pvec;
772 bool done = false;
773 int i, ret = 0;
Ross Zwisler9973c982016-01-22 15:10:47 -0800774
775 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
776 return -EIO;
777
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800778 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
779 return 0;
780
Dan Williamscccbce62017-01-27 13:31:42 -0800781 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
782 if (!dax_dev)
783 return -EIO;
784
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300785 start_index = wbc->range_start >> PAGE_SHIFT;
786 end_index = wbc->range_end >> PAGE_SHIFT;
Ross Zwisler9973c982016-01-22 15:10:47 -0800787
Ross Zwislerd14a3f42017-05-08 16:00:10 -0700788 trace_dax_writeback_range(inode, start_index, end_index);
789
Ross Zwisler9973c982016-01-22 15:10:47 -0800790 tag_pages_for_writeback(mapping, start_index, end_index);
791
792 pagevec_init(&pvec, 0);
793 while (!done) {
794 pvec.nr = find_get_entries_tag(mapping, start_index,
795 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
796 pvec.pages, indices);
797
798 if (pvec.nr == 0)
799 break;
800
801 for (i = 0; i < pvec.nr; i++) {
802 if (indices[i] > end_index) {
803 done = true;
804 break;
805 }
806
Dan Williamscccbce62017-01-27 13:31:42 -0800807 ret = dax_writeback_one(bdev, dax_dev, mapping,
808 indices[i], pvec.pages[i]);
Jeff Layton819ec6b2017-07-06 07:02:27 -0400809 if (ret < 0) {
810 mapping_set_error(mapping, ret);
Ross Zwislerd14a3f42017-05-08 16:00:10 -0700811 goto out;
Jeff Layton819ec6b2017-07-06 07:02:27 -0400812 }
Ross Zwisler9973c982016-01-22 15:10:47 -0800813 }
Jan Kara1eb643d2017-06-23 15:08:46 -0700814 start_index = indices[pvec.nr - 1] + 1;
Ross Zwisler9973c982016-01-22 15:10:47 -0800815 }
Ross Zwislerd14a3f42017-05-08 16:00:10 -0700816out:
Dan Williamscccbce62017-01-27 13:31:42 -0800817 put_dax(dax_dev);
Ross Zwislerd14a3f42017-05-08 16:00:10 -0700818 trace_dax_writeback_range_done(inode, start_index, end_index);
819 return (ret < 0 ? ret : 0);
Ross Zwisler9973c982016-01-22 15:10:47 -0800820}
821EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
822
Jan Kara31a6f1a2017-11-01 16:36:32 +0100823static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800824{
Jan Kara31a6f1a2017-11-01 16:36:32 +0100825 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
826}
827
Jan Kara5e161e42017-11-01 16:36:33 +0100828static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
829 pfn_t *pfnp)
830{
831 const sector_t sector = dax_iomap_sector(iomap, pos);
832 pgoff_t pgoff;
833 void *kaddr;
834 int id, rc;
835 long length;
836
837 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
838 if (rc)
839 return rc;
840 id = dax_read_lock();
841 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
842 &kaddr, pfnp);
843 if (length < 0) {
844 rc = length;
845 goto out;
846 }
847 rc = -EINVAL;
848 if (PFN_PHYS(length) < size)
849 goto out;
850 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
851 goto out;
852 /* For larger pages we need devmap */
853 if (length > 1 && !pfn_t_devmap(*pfnp))
854 goto out;
855 rc = 0;
856out:
857 dax_read_unlock(id);
858 return rc;
859}
860
Ross Zwislere30331f2017-09-06 16:18:39 -0700861/*
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700862 * The user has performed a load from a hole in the file. Allocating a new
863 * page in the file would cause excessive storage usage for workloads with
864 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
865 * If this page is ever written to we will re-fault and change the mapping to
866 * point to real DAX storage instead.
Ross Zwislere30331f2017-09-06 16:18:39 -0700867 */
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700868static int dax_load_hole(struct address_space *mapping, void *entry,
Ross Zwislere30331f2017-09-06 16:18:39 -0700869 struct vm_fault *vmf)
870{
871 struct inode *inode = mapping->host;
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700872 unsigned long vaddr = vmf->address;
873 int ret = VM_FAULT_NOPAGE;
874 struct page *zero_page;
875 void *entry2;
Ross Zwislere30331f2017-09-06 16:18:39 -0700876
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700877 zero_page = ZERO_PAGE(0);
878 if (unlikely(!zero_page)) {
Ross Zwislere30331f2017-09-06 16:18:39 -0700879 ret = VM_FAULT_OOM;
880 goto out;
881 }
882
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700883 entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0,
Jan Karaf5b7b742017-11-01 16:36:40 +0100884 RADIX_DAX_ZERO_PAGE, false);
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700885 if (IS_ERR(entry2)) {
886 ret = VM_FAULT_SIGBUS;
887 goto out;
Ross Zwislere30331f2017-09-06 16:18:39 -0700888 }
Ross Zwisler91d25ba2017-09-06 16:18:43 -0700889
890 vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page));
Ross Zwislere30331f2017-09-06 16:18:39 -0700891out:
892 trace_dax_load_hole(inode, vmf, ret);
893 return ret;
894}
895
Vishal Verma4b0228f2016-04-21 15:13:46 -0400896static bool dax_range_is_aligned(struct block_device *bdev,
897 unsigned int offset, unsigned int length)
898{
899 unsigned short sector_size = bdev_logical_block_size(bdev);
900
901 if (!IS_ALIGNED(offset, sector_size))
902 return false;
903 if (!IS_ALIGNED(length, sector_size))
904 return false;
905
906 return true;
907}
908
Dan Williamscccbce62017-01-27 13:31:42 -0800909int __dax_zero_page_range(struct block_device *bdev,
910 struct dax_device *dax_dev, sector_t sector,
911 unsigned int offset, unsigned int size)
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200912{
Dan Williamscccbce62017-01-27 13:31:42 -0800913 if (dax_range_is_aligned(bdev, offset, size)) {
914 sector_t start_sector = sector + (offset >> 9);
Vishal Verma4b0228f2016-04-21 15:13:46 -0400915
916 return blkdev_issue_zeroout(bdev, start_sector,
Linus Torvalds53ef7d02017-05-05 18:49:20 -0700917 size >> 9, GFP_NOFS, 0);
Vishal Verma4b0228f2016-04-21 15:13:46 -0400918 } else {
Dan Williamscccbce62017-01-27 13:31:42 -0800919 pgoff_t pgoff;
920 long rc, id;
921 void *kaddr;
922 pfn_t pfn;
923
Dan Williamse84b83b2017-05-10 19:38:13 -0700924 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
Dan Williamscccbce62017-01-27 13:31:42 -0800925 if (rc)
926 return rc;
927
928 id = dax_read_lock();
Dan Williamse84b83b2017-05-10 19:38:13 -0700929 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
Dan Williamscccbce62017-01-27 13:31:42 -0800930 &pfn);
931 if (rc < 0) {
932 dax_read_unlock(id);
933 return rc;
934 }
Dan Williams81f55872017-05-29 13:12:20 -0700935 memset(kaddr + offset, 0, size);
Mikulas Patockac3ca0152017-08-31 21:47:43 -0400936 dax_flush(dax_dev, kaddr + offset, size);
Dan Williamscccbce62017-01-27 13:31:42 -0800937 dax_read_unlock(id);
Vishal Verma4b0228f2016-04-21 15:13:46 -0400938 }
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200939 return 0;
940}
941EXPORT_SYMBOL_GPL(__dax_zero_page_range);
942
Christoph Hellwiga254e562016-09-19 11:24:49 +1000943static loff_t
Ross Zwisler11c59c92016-11-08 11:32:46 +1100944dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
Christoph Hellwiga254e562016-09-19 11:24:49 +1000945 struct iomap *iomap)
946{
Dan Williamscccbce62017-01-27 13:31:42 -0800947 struct block_device *bdev = iomap->bdev;
948 struct dax_device *dax_dev = iomap->dax_dev;
Christoph Hellwiga254e562016-09-19 11:24:49 +1000949 struct iov_iter *iter = data;
950 loff_t end = pos + length, done = 0;
951 ssize_t ret = 0;
Dan Williamscccbce62017-01-27 13:31:42 -0800952 int id;
Christoph Hellwiga254e562016-09-19 11:24:49 +1000953
954 if (iov_iter_rw(iter) == READ) {
955 end = min(end, i_size_read(inode));
956 if (pos >= end)
957 return 0;
958
959 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
960 return iov_iter_zero(min(length, end - pos), iter);
961 }
962
963 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
964 return -EIO;
965
Jan Karae3fce682016-08-10 17:10:28 +0200966 /*
967 * Write can allocate block for an area which has a hole page mapped
968 * into page tables. We have to tear down these mappings so that data
969 * written by write(2) is visible in mmap.
970 */
Jan Karacd656372017-05-12 15:46:50 -0700971 if (iomap->flags & IOMAP_F_NEW) {
Jan Karae3fce682016-08-10 17:10:28 +0200972 invalidate_inode_pages2_range(inode->i_mapping,
973 pos >> PAGE_SHIFT,
974 (end - 1) >> PAGE_SHIFT);
975 }
976
Dan Williamscccbce62017-01-27 13:31:42 -0800977 id = dax_read_lock();
Christoph Hellwiga254e562016-09-19 11:24:49 +1000978 while (pos < end) {
979 unsigned offset = pos & (PAGE_SIZE - 1);
Dan Williamscccbce62017-01-27 13:31:42 -0800980 const size_t size = ALIGN(length + offset, PAGE_SIZE);
981 const sector_t sector = dax_iomap_sector(iomap, pos);
Christoph Hellwiga254e562016-09-19 11:24:49 +1000982 ssize_t map_len;
Dan Williamscccbce62017-01-27 13:31:42 -0800983 pgoff_t pgoff;
984 void *kaddr;
985 pfn_t pfn;
Christoph Hellwiga254e562016-09-19 11:24:49 +1000986
Michal Hockod1908f52017-02-03 13:13:26 -0800987 if (fatal_signal_pending(current)) {
988 ret = -EINTR;
989 break;
990 }
991
Dan Williamscccbce62017-01-27 13:31:42 -0800992 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
993 if (ret)
994 break;
995
996 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
997 &kaddr, &pfn);
Christoph Hellwiga254e562016-09-19 11:24:49 +1000998 if (map_len < 0) {
999 ret = map_len;
1000 break;
1001 }
1002
Dan Williamscccbce62017-01-27 13:31:42 -08001003 map_len = PFN_PHYS(map_len);
1004 kaddr += offset;
Christoph Hellwiga254e562016-09-19 11:24:49 +10001005 map_len -= offset;
1006 if (map_len > end - pos)
1007 map_len = end - pos;
1008
Ross Zwislera2e050f2017-09-06 16:18:54 -07001009 /*
1010 * The userspace address for the memory copy has already been
1011 * validated via access_ok() in either vfs_read() or
1012 * vfs_write(), depending on which operation we are doing.
1013 */
Christoph Hellwiga254e562016-09-19 11:24:49 +10001014 if (iov_iter_rw(iter) == WRITE)
Dan Williamsfec53772017-05-29 21:56:49 -07001015 map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1016 map_len, iter);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001017 else
Dan Williamscccbce62017-01-27 13:31:42 -08001018 map_len = copy_to_iter(kaddr, map_len, iter);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001019 if (map_len <= 0) {
1020 ret = map_len ? map_len : -EFAULT;
1021 break;
1022 }
1023
1024 pos += map_len;
1025 length -= map_len;
1026 done += map_len;
1027 }
Dan Williamscccbce62017-01-27 13:31:42 -08001028 dax_read_unlock(id);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001029
1030 return done ? done : ret;
1031}
1032
1033/**
Ross Zwisler11c59c92016-11-08 11:32:46 +11001034 * dax_iomap_rw - Perform I/O to a DAX file
Christoph Hellwiga254e562016-09-19 11:24:49 +10001035 * @iocb: The control block for this I/O
1036 * @iter: The addresses to do I/O from or to
1037 * @ops: iomap ops passed from the file system
1038 *
1039 * This function performs read and write operations to directly mapped
1040 * persistent memory. The callers needs to take care of read/write exclusion
1041 * and evicting any page cache pages in the region under I/O.
1042 */
1043ssize_t
Ross Zwisler11c59c92016-11-08 11:32:46 +11001044dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwig8ff6daa2017-01-27 23:20:26 -08001045 const struct iomap_ops *ops)
Christoph Hellwiga254e562016-09-19 11:24:49 +10001046{
1047 struct address_space *mapping = iocb->ki_filp->f_mapping;
1048 struct inode *inode = mapping->host;
1049 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1050 unsigned flags = 0;
1051
Christoph Hellwig168316d2017-02-08 14:43:13 -05001052 if (iov_iter_rw(iter) == WRITE) {
1053 lockdep_assert_held_exclusive(&inode->i_rwsem);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001054 flags |= IOMAP_WRITE;
Christoph Hellwig168316d2017-02-08 14:43:13 -05001055 } else {
1056 lockdep_assert_held(&inode->i_rwsem);
1057 }
Christoph Hellwiga254e562016-09-19 11:24:49 +10001058
Christoph Hellwiga254e562016-09-19 11:24:49 +10001059 while (iov_iter_count(iter)) {
1060 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
Ross Zwisler11c59c92016-11-08 11:32:46 +11001061 iter, dax_iomap_actor);
Christoph Hellwiga254e562016-09-19 11:24:49 +10001062 if (ret <= 0)
1063 break;
1064 pos += ret;
1065 done += ret;
1066 }
1067
1068 iocb->ki_pos += done;
1069 return done ? done : ret;
1070}
Ross Zwisler11c59c92016-11-08 11:32:46 +11001071EXPORT_SYMBOL_GPL(dax_iomap_rw);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001072
Jan Kara9f141d62016-10-19 14:34:31 +02001073static int dax_fault_return(int error)
1074{
1075 if (error == 0)
1076 return VM_FAULT_NOPAGE;
1077 if (error == -ENOMEM)
1078 return VM_FAULT_OOM;
1079 return VM_FAULT_SIGBUS;
1080}
1081
Dan Williamsaaa422c2017-11-13 16:38:44 -08001082/*
1083 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1084 * flushed on write-faults (non-cow), but not read-faults.
1085 */
1086static bool dax_fault_is_synchronous(unsigned long flags,
1087 struct vm_area_struct *vma, struct iomap *iomap)
1088{
1089 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1090 && (iomap->flags & IOMAP_F_DIRTY);
1091}
1092
Jan Kara9a0dd422017-11-01 16:36:39 +01001093static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
Dave Jianga2d58162017-02-24 14:56:59 -08001094 const struct iomap_ops *ops)
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001095{
Jan Karaa0987ad2017-11-01 16:36:34 +01001096 struct vm_area_struct *vma = vmf->vma;
1097 struct address_space *mapping = vma->vm_file->f_mapping;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001098 struct inode *inode = mapping->host;
Jan Kara1a29d852016-12-14 15:07:01 -08001099 unsigned long vaddr = vmf->address;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001100 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001101 struct iomap iomap = { 0 };
Jan Kara9484ab12016-11-10 10:26:50 +11001102 unsigned flags = IOMAP_FAULT;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001103 int error, major = 0;
Jan Karad2c43ef2017-11-01 16:36:35 +01001104 bool write = vmf->flags & FAULT_FLAG_WRITE;
Jan Karacaa51d22017-11-01 16:36:42 +01001105 bool sync;
Jan Karab1aa8122016-12-14 15:07:24 -08001106 int vmf_ret = 0;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001107 void *entry;
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001108 pfn_t pfn;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001109
Ross Zwislera9c42b32017-05-08 16:00:00 -07001110 trace_dax_pte_fault(inode, vmf, vmf_ret);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001111 /*
1112 * Check whether offset isn't beyond end of file now. Caller is supposed
1113 * to hold locks serializing us with truncate / punch hole so this is
1114 * a reliable test.
1115 */
Ross Zwislera9c42b32017-05-08 16:00:00 -07001116 if (pos >= i_size_read(inode)) {
1117 vmf_ret = VM_FAULT_SIGBUS;
1118 goto out;
1119 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001120
Jan Karad2c43ef2017-11-01 16:36:35 +01001121 if (write && !vmf->cow_page)
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001122 flags |= IOMAP_WRITE;
1123
Jan Kara13e451f2017-05-12 15:46:57 -07001124 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1125 if (IS_ERR(entry)) {
1126 vmf_ret = dax_fault_return(PTR_ERR(entry));
1127 goto out;
1128 }
1129
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001130 /*
Ross Zwislere2093922017-06-02 14:46:37 -07001131 * It is possible, particularly with mixed reads & writes to private
1132 * mappings, that we have raced with a PMD fault that overlaps with
1133 * the PTE we need to set up. If so just return and the fault will be
1134 * retried.
1135 */
1136 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1137 vmf_ret = VM_FAULT_NOPAGE;
1138 goto unlock_entry;
1139 }
1140
1141 /*
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001142 * Note that we don't bother to use iomap_apply here: DAX required
1143 * the file system block size to be equal the page size, which means
1144 * that we never have to deal with more than a single extent here.
1145 */
1146 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
Ross Zwislera9c42b32017-05-08 16:00:00 -07001147 if (error) {
1148 vmf_ret = dax_fault_return(error);
Jan Kara13e451f2017-05-12 15:46:57 -07001149 goto unlock_entry;
Ross Zwislera9c42b32017-05-08 16:00:00 -07001150 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001151 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
Jan Kara13e451f2017-05-12 15:46:57 -07001152 error = -EIO; /* fs corruption? */
1153 goto error_finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001154 }
1155
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001156 if (vmf->cow_page) {
Jan Kara31a6f1a2017-11-01 16:36:32 +01001157 sector_t sector = dax_iomap_sector(&iomap, pos);
1158
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001159 switch (iomap.type) {
1160 case IOMAP_HOLE:
1161 case IOMAP_UNWRITTEN:
1162 clear_user_highpage(vmf->cow_page, vaddr);
1163 break;
1164 case IOMAP_MAPPED:
Dan Williamscccbce62017-01-27 13:31:42 -08001165 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1166 sector, PAGE_SIZE, vmf->cow_page, vaddr);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001167 break;
1168 default:
1169 WARN_ON_ONCE(1);
1170 error = -EIO;
1171 break;
1172 }
1173
1174 if (error)
Jan Kara13e451f2017-05-12 15:46:57 -07001175 goto error_finish_iomap;
Jan Karab1aa8122016-12-14 15:07:24 -08001176
1177 __SetPageUptodate(vmf->cow_page);
1178 vmf_ret = finish_fault(vmf);
1179 if (!vmf_ret)
1180 vmf_ret = VM_FAULT_DONE_COW;
Jan Kara13e451f2017-05-12 15:46:57 -07001181 goto finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001182 }
1183
Dan Williamsaaa422c2017-11-13 16:38:44 -08001184 sync = dax_fault_is_synchronous(flags, vma, &iomap);
Jan Karacaa51d22017-11-01 16:36:42 +01001185
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001186 switch (iomap.type) {
1187 case IOMAP_MAPPED:
1188 if (iomap.flags & IOMAP_F_NEW) {
1189 count_vm_event(PGMAJFAULT);
Jan Karaa0987ad2017-11-01 16:36:34 +01001190 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001191 major = VM_FAULT_MAJOR;
1192 }
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001193 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1194 if (error < 0)
1195 goto error_finish_iomap;
1196
1197 entry = dax_insert_mapping_entry(mapping, vmf, entry,
1198 dax_iomap_sector(&iomap, pos),
Jan Karacaa51d22017-11-01 16:36:42 +01001199 0, write && !sync);
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001200 if (IS_ERR(entry)) {
1201 error = PTR_ERR(entry);
1202 goto error_finish_iomap;
1203 }
1204
Jan Karacaa51d22017-11-01 16:36:42 +01001205 /*
1206 * If we are doing synchronous page fault and inode needs fsync,
1207 * we can insert PTE into page tables only after that happens.
1208 * Skip insertion for now and return the pfn so that caller can
1209 * insert it after fsync is done.
1210 */
1211 if (sync) {
1212 if (WARN_ON_ONCE(!pfnp)) {
1213 error = -EIO;
1214 goto error_finish_iomap;
1215 }
1216 *pfnp = pfn;
1217 vmf_ret = VM_FAULT_NEEDDSYNC | major;
1218 goto finish_iomap;
1219 }
Jan Kara1b5a1cb2017-11-01 16:36:36 +01001220 trace_dax_insert_mapping(inode, vmf, entry);
1221 if (write)
1222 error = vm_insert_mixed_mkwrite(vma, vaddr, pfn);
1223 else
1224 error = vm_insert_mixed(vma, vaddr, pfn);
1225
Jan Kara9f141d62016-10-19 14:34:31 +02001226 /* -EBUSY is fine, somebody else faulted on the same PTE */
1227 if (error == -EBUSY)
1228 error = 0;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001229 break;
1230 case IOMAP_UNWRITTEN:
1231 case IOMAP_HOLE:
Jan Karad2c43ef2017-11-01 16:36:35 +01001232 if (!write) {
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001233 vmf_ret = dax_load_hole(mapping, entry, vmf);
Jan Kara13e451f2017-05-12 15:46:57 -07001234 goto finish_iomap;
Ross Zwisler15502902016-11-08 11:33:26 +11001235 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001236 /*FALLTHRU*/
1237 default:
1238 WARN_ON_ONCE(1);
1239 error = -EIO;
1240 break;
1241 }
1242
Jan Kara13e451f2017-05-12 15:46:57 -07001243 error_finish_iomap:
Jan Kara9f141d62016-10-19 14:34:31 +02001244 vmf_ret = dax_fault_return(error) | major;
Jan Kara9f141d62016-10-19 14:34:31 +02001245 finish_iomap:
1246 if (ops->iomap_end) {
1247 int copied = PAGE_SIZE;
1248
1249 if (vmf_ret & VM_FAULT_ERROR)
1250 copied = 0;
1251 /*
1252 * The fault is done by now and there's no way back (other
1253 * thread may be already happily using PTE we have installed).
1254 * Just ignore error from ->iomap_end since we cannot do much
1255 * with it.
1256 */
1257 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
Ross Zwisler15502902016-11-08 11:33:26 +11001258 }
Jan Kara13e451f2017-05-12 15:46:57 -07001259 unlock_entry:
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001260 put_locked_mapping_entry(mapping, vmf->pgoff);
Jan Kara13e451f2017-05-12 15:46:57 -07001261 out:
Ross Zwislera9c42b32017-05-08 16:00:00 -07001262 trace_dax_pte_fault_done(inode, vmf, vmf_ret);
Jan Kara9f141d62016-10-19 14:34:31 +02001263 return vmf_ret;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001264}
Ross Zwisler642261a2016-11-08 11:34:45 +11001265
1266#ifdef CONFIG_FS_DAX_PMD
Jan Kara302a5e32017-11-01 16:36:37 +01001267/*
1268 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1269 * more often than one might expect in the below functions.
1270 */
1271#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
Ross Zwisler642261a2016-11-08 11:34:45 +11001272
Dave Jiangf4200392017-02-22 15:40:06 -08001273static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001274 void *entry)
Ross Zwisler642261a2016-11-08 11:34:45 +11001275{
Dave Jiangf4200392017-02-22 15:40:06 -08001276 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1277 unsigned long pmd_addr = vmf->address & PMD_MASK;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001278 struct inode *inode = mapping->host;
Ross Zwisler642261a2016-11-08 11:34:45 +11001279 struct page *zero_page;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001280 void *ret = NULL;
Ross Zwisler642261a2016-11-08 11:34:45 +11001281 spinlock_t *ptl;
1282 pmd_t pmd_entry;
Ross Zwisler642261a2016-11-08 11:34:45 +11001283
Dave Jiangf4200392017-02-22 15:40:06 -08001284 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
Ross Zwisler642261a2016-11-08 11:34:45 +11001285
1286 if (unlikely(!zero_page))
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001287 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +11001288
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001289 ret = dax_insert_mapping_entry(mapping, vmf, entry, 0,
Jan Karaf5b7b742017-11-01 16:36:40 +01001290 RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE, false);
Ross Zwisler642261a2016-11-08 11:34:45 +11001291 if (IS_ERR(ret))
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001292 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +11001293
Dave Jiangf4200392017-02-22 15:40:06 -08001294 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1295 if (!pmd_none(*(vmf->pmd))) {
Ross Zwisler642261a2016-11-08 11:34:45 +11001296 spin_unlock(ptl);
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001297 goto fallback;
Ross Zwisler642261a2016-11-08 11:34:45 +11001298 }
1299
Dave Jiangf4200392017-02-22 15:40:06 -08001300 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
Ross Zwisler642261a2016-11-08 11:34:45 +11001301 pmd_entry = pmd_mkhuge(pmd_entry);
Dave Jiangf4200392017-02-22 15:40:06 -08001302 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001303 spin_unlock(ptl);
Dave Jiangf4200392017-02-22 15:40:06 -08001304 trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
Ross Zwisler642261a2016-11-08 11:34:45 +11001305 return VM_FAULT_NOPAGE;
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001306
1307fallback:
Dave Jiangf4200392017-02-22 15:40:06 -08001308 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
Ross Zwisler653b2ea2017-02-22 15:39:57 -08001309 return VM_FAULT_FALLBACK;
Ross Zwisler642261a2016-11-08 11:34:45 +11001310}
1311
Jan Kara9a0dd422017-11-01 16:36:39 +01001312static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
Dave Jianga2d58162017-02-24 14:56:59 -08001313 const struct iomap_ops *ops)
Ross Zwisler642261a2016-11-08 11:34:45 +11001314{
Dave Jiangf4200392017-02-22 15:40:06 -08001315 struct vm_area_struct *vma = vmf->vma;
Ross Zwisler642261a2016-11-08 11:34:45 +11001316 struct address_space *mapping = vma->vm_file->f_mapping;
Dave Jiangd8a849e2017-02-22 15:40:03 -08001317 unsigned long pmd_addr = vmf->address & PMD_MASK;
1318 bool write = vmf->flags & FAULT_FLAG_WRITE;
Jan Karacaa51d22017-11-01 16:36:42 +01001319 bool sync;
Jan Kara9484ab12016-11-10 10:26:50 +11001320 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
Ross Zwisler642261a2016-11-08 11:34:45 +11001321 struct inode *inode = mapping->host;
1322 int result = VM_FAULT_FALLBACK;
1323 struct iomap iomap = { 0 };
1324 pgoff_t max_pgoff, pgoff;
Ross Zwisler642261a2016-11-08 11:34:45 +11001325 void *entry;
1326 loff_t pos;
1327 int error;
Jan Kara302a5e32017-11-01 16:36:37 +01001328 pfn_t pfn;
Ross Zwisler642261a2016-11-08 11:34:45 +11001329
Ross Zwisler282a8e02017-02-22 15:39:50 -08001330 /*
1331 * Check whether offset isn't beyond end of file now. Caller is
1332 * supposed to hold locks serializing us with truncate / punch hole so
1333 * this is a reliable test.
1334 */
1335 pgoff = linear_page_index(vma, pmd_addr);
Jeff Moyer957ac8c2017-11-14 20:37:27 -05001336 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
Ross Zwisler282a8e02017-02-22 15:39:50 -08001337
Dave Jiangf4200392017-02-22 15:40:06 -08001338 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
Ross Zwisler282a8e02017-02-22 15:39:50 -08001339
Ross Zwislerfffa2812017-08-25 15:55:36 -07001340 /*
1341 * Make sure that the faulting address's PMD offset (color) matches
1342 * the PMD offset from the start of the file. This is necessary so
1343 * that a PMD range in the page table overlaps exactly with a PMD
1344 * range in the radix tree.
1345 */
1346 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1347 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1348 goto fallback;
1349
Ross Zwisler642261a2016-11-08 11:34:45 +11001350 /* Fall back to PTEs if we're going to COW */
1351 if (write && !(vma->vm_flags & VM_SHARED))
1352 goto fallback;
1353
1354 /* If the PMD would extend outside the VMA */
1355 if (pmd_addr < vma->vm_start)
1356 goto fallback;
1357 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1358 goto fallback;
1359
Jeff Moyer957ac8c2017-11-14 20:37:27 -05001360 if (pgoff >= max_pgoff) {
Ross Zwisler282a8e02017-02-22 15:39:50 -08001361 result = VM_FAULT_SIGBUS;
1362 goto out;
1363 }
Ross Zwisler642261a2016-11-08 11:34:45 +11001364
1365 /* If the PMD would extend beyond the file size */
Jeff Moyer957ac8c2017-11-14 20:37:27 -05001366 if ((pgoff | PG_PMD_COLOUR) >= max_pgoff)
Ross Zwisler642261a2016-11-08 11:34:45 +11001367 goto fallback;
1368
1369 /*
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001370 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1371 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
1372 * is already in the tree, for instance), it will return -EEXIST and
1373 * we just fall back to 4k entries.
Jan Kara9f141d62016-10-19 14:34:31 +02001374 */
1375 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1376 if (IS_ERR(entry))
Ross Zwisler876f2942017-05-12 15:47:00 -07001377 goto fallback;
1378
1379 /*
Ross Zwislere2093922017-06-02 14:46:37 -07001380 * It is possible, particularly with mixed reads & writes to private
1381 * mappings, that we have raced with a PTE fault that overlaps with
1382 * the PMD we need to set up. If so just return and the fault will be
1383 * retried.
1384 */
1385 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1386 !pmd_devmap(*vmf->pmd)) {
1387 result = 0;
1388 goto unlock_entry;
1389 }
1390
1391 /*
Ross Zwisler876f2942017-05-12 15:47:00 -07001392 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1393 * setting up a mapping, so really we're using iomap_begin() as a way
1394 * to look up our filesystem block.
1395 */
1396 pos = (loff_t)pgoff << PAGE_SHIFT;
1397 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1398 if (error)
1399 goto unlock_entry;
1400
1401 if (iomap.offset + iomap.length < pos + PMD_SIZE)
Jan Kara9f141d62016-10-19 14:34:31 +02001402 goto finish_iomap;
1403
Dan Williamsaaa422c2017-11-13 16:38:44 -08001404 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
Jan Karacaa51d22017-11-01 16:36:42 +01001405
Ross Zwisler642261a2016-11-08 11:34:45 +11001406 switch (iomap.type) {
1407 case IOMAP_MAPPED:
Jan Kara302a5e32017-11-01 16:36:37 +01001408 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1409 if (error < 0)
1410 goto finish_iomap;
1411
1412 entry = dax_insert_mapping_entry(mapping, vmf, entry,
1413 dax_iomap_sector(&iomap, pos),
Jan Karacaa51d22017-11-01 16:36:42 +01001414 RADIX_DAX_PMD, write && !sync);
Jan Kara302a5e32017-11-01 16:36:37 +01001415 if (IS_ERR(entry))
1416 goto finish_iomap;
1417
Jan Karacaa51d22017-11-01 16:36:42 +01001418 /*
1419 * If we are doing synchronous page fault and inode needs fsync,
1420 * we can insert PMD into page tables only after that happens.
1421 * Skip insertion for now and return the pfn so that caller can
1422 * insert it after fsync is done.
1423 */
1424 if (sync) {
1425 if (WARN_ON_ONCE(!pfnp))
1426 goto finish_iomap;
1427 *pfnp = pfn;
1428 result = VM_FAULT_NEEDDSYNC;
1429 goto finish_iomap;
1430 }
1431
Jan Kara302a5e32017-11-01 16:36:37 +01001432 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1433 result = vmf_insert_pfn_pmd(vma, vmf->address, vmf->pmd, pfn,
1434 write);
Ross Zwisler642261a2016-11-08 11:34:45 +11001435 break;
1436 case IOMAP_UNWRITTEN:
1437 case IOMAP_HOLE:
1438 if (WARN_ON_ONCE(write))
Ross Zwisler876f2942017-05-12 15:47:00 -07001439 break;
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001440 result = dax_pmd_load_hole(vmf, &iomap, entry);
Ross Zwisler642261a2016-11-08 11:34:45 +11001441 break;
1442 default:
1443 WARN_ON_ONCE(1);
1444 break;
1445 }
1446
Jan Kara9f141d62016-10-19 14:34:31 +02001447 finish_iomap:
1448 if (ops->iomap_end) {
1449 int copied = PMD_SIZE;
1450
1451 if (result == VM_FAULT_FALLBACK)
1452 copied = 0;
1453 /*
1454 * The fault is done by now and there's no way back (other
1455 * thread may be already happily using PMD we have installed).
1456 * Just ignore error from ->iomap_end since we cannot do much
1457 * with it.
1458 */
1459 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1460 &iomap);
1461 }
Ross Zwisler876f2942017-05-12 15:47:00 -07001462 unlock_entry:
Ross Zwisler91d25ba2017-09-06 16:18:43 -07001463 put_locked_mapping_entry(mapping, pgoff);
Ross Zwisler642261a2016-11-08 11:34:45 +11001464 fallback:
1465 if (result == VM_FAULT_FALLBACK) {
Dave Jiangd8a849e2017-02-22 15:40:03 -08001466 split_huge_pmd(vma, vmf->pmd, vmf->address);
Ross Zwisler642261a2016-11-08 11:34:45 +11001467 count_vm_event(THP_FAULT_FALLBACK);
1468 }
Ross Zwisler282a8e02017-02-22 15:39:50 -08001469out:
Dave Jiangf4200392017-02-22 15:40:06 -08001470 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
Ross Zwisler642261a2016-11-08 11:34:45 +11001471 return result;
1472}
Dave Jianga2d58162017-02-24 14:56:59 -08001473#else
Jan Kara9a0dd422017-11-01 16:36:39 +01001474static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
Arnd Bergmann01cddfe2017-02-27 14:26:44 -08001475 const struct iomap_ops *ops)
Dave Jianga2d58162017-02-24 14:56:59 -08001476{
1477 return VM_FAULT_FALLBACK;
1478}
Ross Zwisler642261a2016-11-08 11:34:45 +11001479#endif /* CONFIG_FS_DAX_PMD */
Dave Jianga2d58162017-02-24 14:56:59 -08001480
1481/**
1482 * dax_iomap_fault - handle a page fault on a DAX file
1483 * @vmf: The description of the fault
Jan Karacec04e82017-11-01 16:36:38 +01001484 * @pe_size: Size of the page to fault in
Jan Kara9a0dd422017-11-01 16:36:39 +01001485 * @pfnp: PFN to insert for synchronous faults if fsync is required
Jan Karacec04e82017-11-01 16:36:38 +01001486 * @ops: Iomap ops passed from the file system
Dave Jianga2d58162017-02-24 14:56:59 -08001487 *
1488 * When a page fault occurs, filesystems may call this helper in
1489 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1490 * has done all the necessary locking for page fault to proceed
1491 * successfully.
1492 */
Dave Jiangc791ace2017-02-24 14:57:08 -08001493int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
Jan Kara9a0dd422017-11-01 16:36:39 +01001494 pfn_t *pfnp, const struct iomap_ops *ops)
Dave Jianga2d58162017-02-24 14:56:59 -08001495{
Dave Jiangc791ace2017-02-24 14:57:08 -08001496 switch (pe_size) {
1497 case PE_SIZE_PTE:
Jan Kara9a0dd422017-11-01 16:36:39 +01001498 return dax_iomap_pte_fault(vmf, pfnp, ops);
Dave Jiangc791ace2017-02-24 14:57:08 -08001499 case PE_SIZE_PMD:
Jan Kara9a0dd422017-11-01 16:36:39 +01001500 return dax_iomap_pmd_fault(vmf, pfnp, ops);
Dave Jianga2d58162017-02-24 14:56:59 -08001501 default:
1502 return VM_FAULT_FALLBACK;
1503 }
1504}
1505EXPORT_SYMBOL_GPL(dax_iomap_fault);
Jan Kara71eab6d2017-11-01 16:36:43 +01001506
1507/**
1508 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1509 * @vmf: The description of the fault
1510 * @pe_size: Size of entry to be inserted
1511 * @pfn: PFN to insert
1512 *
1513 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1514 * DAX file. It takes care of marking corresponding radix tree entry as dirty
1515 * as well.
1516 */
1517static int dax_insert_pfn_mkwrite(struct vm_fault *vmf,
1518 enum page_entry_size pe_size,
1519 pfn_t pfn)
1520{
1521 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1522 void *entry, **slot;
1523 pgoff_t index = vmf->pgoff;
1524 int vmf_ret, error;
1525
1526 spin_lock_irq(&mapping->tree_lock);
1527 entry = get_unlocked_mapping_entry(mapping, index, &slot);
1528 /* Did we race with someone splitting entry or so? */
1529 if (!entry ||
1530 (pe_size == PE_SIZE_PTE && !dax_is_pte_entry(entry)) ||
1531 (pe_size == PE_SIZE_PMD && !dax_is_pmd_entry(entry))) {
1532 put_unlocked_mapping_entry(mapping, index, entry);
1533 spin_unlock_irq(&mapping->tree_lock);
1534 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1535 VM_FAULT_NOPAGE);
1536 return VM_FAULT_NOPAGE;
1537 }
1538 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
1539 entry = lock_slot(mapping, slot);
1540 spin_unlock_irq(&mapping->tree_lock);
1541 switch (pe_size) {
1542 case PE_SIZE_PTE:
1543 error = vm_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1544 vmf_ret = dax_fault_return(error);
1545 break;
1546#ifdef CONFIG_FS_DAX_PMD
1547 case PE_SIZE_PMD:
1548 vmf_ret = vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1549 pfn, true);
1550 break;
1551#endif
1552 default:
1553 vmf_ret = VM_FAULT_FALLBACK;
1554 }
1555 put_locked_mapping_entry(mapping, index);
1556 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, vmf_ret);
1557 return vmf_ret;
1558}
1559
1560/**
1561 * dax_finish_sync_fault - finish synchronous page fault
1562 * @vmf: The description of the fault
1563 * @pe_size: Size of entry to be inserted
1564 * @pfn: PFN to insert
1565 *
1566 * This function ensures that the file range touched by the page fault is
1567 * stored persistently on the media and handles inserting of appropriate page
1568 * table entry.
1569 */
1570int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1571 pfn_t pfn)
1572{
1573 int err;
1574 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1575 size_t len = 0;
1576
1577 if (pe_size == PE_SIZE_PTE)
1578 len = PAGE_SIZE;
1579 else if (pe_size == PE_SIZE_PMD)
1580 len = PMD_SIZE;
1581 else
1582 WARN_ON_ONCE(1);
1583 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1584 if (err)
1585 return VM_FAULT_SIGBUS;
1586 return dax_insert_pfn_mkwrite(vmf, pe_size, pfn);
1587}
1588EXPORT_SYMBOL_GPL(dax_finish_sync_fault);