blob: e83aa4077df4fc63e479c377be469b404f77edc5 [file] [log] [blame]
Matthew Wilcoxd475c632015-02-16 15:58:56 -08001/*
2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 */
16
17#include <linux/atomic.h>
18#include <linux/blkdev.h>
19#include <linux/buffer_head.h>
Ross Zwislerd77e92e2015-09-09 10:29:40 -060020#include <linux/dax.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080021#include <linux/fs.h>
22#include <linux/genhd.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080023#include <linux/highmem.h>
24#include <linux/memcontrol.h>
25#include <linux/mm.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080026#include <linux/mutex.h>
Ross Zwisler9973c982016-01-22 15:10:47 -080027#include <linux/pagevec.h>
Ross Zwisler2765cfb2015-08-18 13:55:40 -060028#include <linux/pmem.h>
Matthew Wilcox289c6ae2015-02-16 15:58:59 -080029#include <linux/sched.h>
Matthew Wilcoxd475c632015-02-16 15:58:56 -080030#include <linux/uio.h>
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -080031#include <linux/vmstat.h>
Dan Williams34c0fd52016-01-15 16:56:14 -080032#include <linux/pfn_t.h>
Dan Williams0e749e52016-01-15 16:55:53 -080033#include <linux/sizes.h>
Christoph Hellwiga254e562016-09-19 11:24:49 +100034#include <linux/iomap.h>
35#include "internal.h"
Matthew Wilcoxd475c632015-02-16 15:58:56 -080036
Jan Karaac401cc2016-05-12 18:29:18 +020037/* We choose 4096 entries - same as per-zone page wait tables */
38#define DAX_WAIT_TABLE_BITS 12
39#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
40
Ross Zwislerce95ab0f2016-11-08 11:31:44 +110041static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
Jan Karaac401cc2016-05-12 18:29:18 +020042
43static int __init init_dax_wait_table(void)
44{
45 int i;
46
47 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
48 init_waitqueue_head(wait_table + i);
49 return 0;
50}
51fs_initcall(init_dax_wait_table);
52
Dan Williamsb2e0d162016-01-15 16:55:59 -080053static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
54{
55 struct request_queue *q = bdev->bd_queue;
56 long rc = -EIO;
57
Dan Williams7a9eb202016-06-03 18:06:47 -070058 dax->addr = ERR_PTR(-EIO);
Dan Williamsb2e0d162016-01-15 16:55:59 -080059 if (blk_queue_enter(q, true) != 0)
60 return rc;
61
62 rc = bdev_direct_access(bdev, dax);
63 if (rc < 0) {
Dan Williams7a9eb202016-06-03 18:06:47 -070064 dax->addr = ERR_PTR(rc);
Dan Williamsb2e0d162016-01-15 16:55:59 -080065 blk_queue_exit(q);
66 return rc;
67 }
68 return rc;
69}
70
71static void dax_unmap_atomic(struct block_device *bdev,
72 const struct blk_dax_ctl *dax)
73{
74 if (IS_ERR(dax->addr))
75 return;
76 blk_queue_exit(bdev->bd_queue);
77}
78
Ross Zwisler642261a2016-11-08 11:34:45 +110079static int dax_is_pmd_entry(void *entry)
80{
81 return (unsigned long)entry & RADIX_DAX_PMD;
82}
83
84static int dax_is_pte_entry(void *entry)
85{
86 return !((unsigned long)entry & RADIX_DAX_PMD);
87}
88
89static int dax_is_zero_entry(void *entry)
90{
91 return (unsigned long)entry & RADIX_DAX_HZP;
92}
93
94static int dax_is_empty_entry(void *entry)
95{
96 return (unsigned long)entry & RADIX_DAX_EMPTY;
97}
98
Dan Williamsd1a5f2b42016-01-28 20:25:31 -080099struct page *read_dax_sector(struct block_device *bdev, sector_t n)
100{
101 struct page *page = alloc_pages(GFP_KERNEL, 0);
102 struct blk_dax_ctl dax = {
103 .size = PAGE_SIZE,
104 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
105 };
106 long rc;
107
108 if (!page)
109 return ERR_PTR(-ENOMEM);
110
111 rc = dax_map_atomic(bdev, &dax);
112 if (rc < 0)
113 return ERR_PTR(rc);
114 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
115 dax_unmap_atomic(bdev, &dax);
116 return page;
117}
118
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800119/*
Jan Karaac401cc2016-05-12 18:29:18 +0200120 * DAX radix tree locking
121 */
122struct exceptional_entry_key {
123 struct address_space *mapping;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100124 pgoff_t entry_start;
Jan Karaac401cc2016-05-12 18:29:18 +0200125};
126
127struct wait_exceptional_entry_queue {
128 wait_queue_t wait;
129 struct exceptional_entry_key key;
130};
131
Ross Zwisler63e95b52016-11-08 11:32:20 +1100132static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
133 pgoff_t index, void *entry, struct exceptional_entry_key *key)
134{
135 unsigned long hash;
136
137 /*
138 * If 'entry' is a PMD, align the 'index' that we use for the wait
139 * queue to the start of that PMD. This ensures that all offsets in
140 * the range covered by the PMD map to the same bit lock.
141 */
Ross Zwisler642261a2016-11-08 11:34:45 +1100142 if (dax_is_pmd_entry(entry))
Ross Zwisler63e95b52016-11-08 11:32:20 +1100143 index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);
144
145 key->mapping = mapping;
146 key->entry_start = index;
147
148 hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
149 return wait_table + hash;
150}
151
Jan Karaac401cc2016-05-12 18:29:18 +0200152static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
153 int sync, void *keyp)
154{
155 struct exceptional_entry_key *key = keyp;
156 struct wait_exceptional_entry_queue *ewait =
157 container_of(wait, struct wait_exceptional_entry_queue, wait);
158
159 if (key->mapping != ewait->key.mapping ||
Ross Zwisler63e95b52016-11-08 11:32:20 +1100160 key->entry_start != ewait->key.entry_start)
Jan Karaac401cc2016-05-12 18:29:18 +0200161 return 0;
162 return autoremove_wake_function(wait, mode, sync, NULL);
163}
164
165/*
166 * Check whether the given slot is locked. The function must be called with
167 * mapping->tree_lock held
168 */
169static inline int slot_locked(struct address_space *mapping, void **slot)
170{
171 unsigned long entry = (unsigned long)
172 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
173 return entry & RADIX_DAX_ENTRY_LOCK;
174}
175
176/*
177 * Mark the given slot is locked. The function must be called with
178 * mapping->tree_lock held
179 */
180static inline void *lock_slot(struct address_space *mapping, void **slot)
181{
182 unsigned long entry = (unsigned long)
183 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
184
185 entry |= RADIX_DAX_ENTRY_LOCK;
Johannes Weiner6d75f362016-12-12 16:43:43 -0800186 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200187 return (void *)entry;
188}
189
190/*
191 * Mark the given slot is unlocked. The function must be called with
192 * mapping->tree_lock held
193 */
194static inline void *unlock_slot(struct address_space *mapping, void **slot)
195{
196 unsigned long entry = (unsigned long)
197 radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
198
199 entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
Johannes Weiner6d75f362016-12-12 16:43:43 -0800200 radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200201 return (void *)entry;
202}
203
204/*
205 * Lookup entry in radix tree, wait for it to become unlocked if it is
206 * exceptional entry and return it. The caller must call
207 * put_unlocked_mapping_entry() when he decided not to lock the entry or
208 * put_locked_mapping_entry() when he locked the entry and now wants to
209 * unlock it.
210 *
211 * The function must be called with mapping->tree_lock held.
212 */
213static void *get_unlocked_mapping_entry(struct address_space *mapping,
214 pgoff_t index, void ***slotp)
215{
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100216 void *entry, **slot;
Jan Karaac401cc2016-05-12 18:29:18 +0200217 struct wait_exceptional_entry_queue ewait;
Ross Zwisler63e95b52016-11-08 11:32:20 +1100218 wait_queue_head_t *wq;
Jan Karaac401cc2016-05-12 18:29:18 +0200219
220 init_wait(&ewait.wait);
221 ewait.wait.func = wake_exceptional_entry_func;
Jan Karaac401cc2016-05-12 18:29:18 +0200222
223 for (;;) {
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100224 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
Jan Karaac401cc2016-05-12 18:29:18 +0200225 &slot);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100226 if (!entry || !radix_tree_exceptional_entry(entry) ||
Jan Karaac401cc2016-05-12 18:29:18 +0200227 !slot_locked(mapping, slot)) {
228 if (slotp)
229 *slotp = slot;
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100230 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200231 }
Ross Zwisler63e95b52016-11-08 11:32:20 +1100232
233 wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
Jan Karaac401cc2016-05-12 18:29:18 +0200234 prepare_to_wait_exclusive(wq, &ewait.wait,
235 TASK_UNINTERRUPTIBLE);
236 spin_unlock_irq(&mapping->tree_lock);
237 schedule();
238 finish_wait(wq, &ewait.wait);
239 spin_lock_irq(&mapping->tree_lock);
240 }
241}
242
Jan Karab1aa8122016-12-14 15:07:24 -0800243static void dax_unlock_mapping_entry(struct address_space *mapping,
244 pgoff_t index)
245{
246 void *entry, **slot;
247
248 spin_lock_irq(&mapping->tree_lock);
249 entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
250 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
251 !slot_locked(mapping, slot))) {
252 spin_unlock_irq(&mapping->tree_lock);
253 return;
254 }
255 unlock_slot(mapping, slot);
256 spin_unlock_irq(&mapping->tree_lock);
257 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
258}
259
Jan Karaac401cc2016-05-12 18:29:18 +0200260static void put_locked_mapping_entry(struct address_space *mapping,
261 pgoff_t index, void *entry)
262{
263 if (!radix_tree_exceptional_entry(entry)) {
264 unlock_page(entry);
265 put_page(entry);
266 } else {
Jan Karabc2466e2016-05-12 18:29:19 +0200267 dax_unlock_mapping_entry(mapping, index);
Jan Karaac401cc2016-05-12 18:29:18 +0200268 }
269}
270
271/*
272 * Called when we are done with radix tree entry we looked up via
273 * get_unlocked_mapping_entry() and which we didn't lock in the end.
274 */
275static void put_unlocked_mapping_entry(struct address_space *mapping,
276 pgoff_t index, void *entry)
277{
278 if (!radix_tree_exceptional_entry(entry))
279 return;
280
281 /* We have to wake up next waiter for the radix tree entry lock */
Ross Zwisler422476c2016-11-08 11:33:44 +1100282 dax_wake_mapping_entry_waiter(mapping, index, entry, false);
283}
284
Jan Karaac401cc2016-05-12 18:29:18 +0200285/*
286 * Find radix tree entry at given index. If it points to a page, return with
287 * the page locked. If it points to the exceptional entry, return with the
288 * radix tree entry locked. If the radix tree doesn't contain given index,
289 * create empty exceptional entry for the index and return with it locked.
290 *
Ross Zwisler642261a2016-11-08 11:34:45 +1100291 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
292 * either return that locked entry or will return an error. This error will
293 * happen if there are any 4k entries (either zero pages or DAX entries)
294 * within the 2MiB range that we are requesting.
295 *
296 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
297 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
298 * insertion will fail if it finds any 4k entries already in the tree, and a
299 * 4k insertion will cause an existing 2MiB entry to be unmapped and
300 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
301 * well as 2MiB empty entries.
302 *
303 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
304 * real storage backing them. We will leave these real 2MiB DAX entries in
305 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
306 *
Jan Karaac401cc2016-05-12 18:29:18 +0200307 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
308 * persistent memory the benefit is doubtful. We can add that later if we can
309 * show it helps.
310 */
Ross Zwisler642261a2016-11-08 11:34:45 +1100311static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
312 unsigned long size_flag)
Jan Karaac401cc2016-05-12 18:29:18 +0200313{
Ross Zwisler642261a2016-11-08 11:34:45 +1100314 bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100315 void *entry, **slot;
Jan Karaac401cc2016-05-12 18:29:18 +0200316
317restart:
318 spin_lock_irq(&mapping->tree_lock);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100319 entry = get_unlocked_mapping_entry(mapping, index, &slot);
Ross Zwisler642261a2016-11-08 11:34:45 +1100320
321 if (entry) {
322 if (size_flag & RADIX_DAX_PMD) {
323 if (!radix_tree_exceptional_entry(entry) ||
324 dax_is_pte_entry(entry)) {
325 put_unlocked_mapping_entry(mapping, index,
326 entry);
327 entry = ERR_PTR(-EEXIST);
328 goto out_unlock;
329 }
330 } else { /* trying to grab a PTE entry */
331 if (radix_tree_exceptional_entry(entry) &&
332 dax_is_pmd_entry(entry) &&
333 (dax_is_zero_entry(entry) ||
334 dax_is_empty_entry(entry))) {
335 pmd_downgrade = true;
336 }
337 }
338 }
339
Jan Karaac401cc2016-05-12 18:29:18 +0200340 /* No entry for given index? Make sure radix tree is big enough. */
Ross Zwisler642261a2016-11-08 11:34:45 +1100341 if (!entry || pmd_downgrade) {
Jan Karaac401cc2016-05-12 18:29:18 +0200342 int err;
343
Ross Zwisler642261a2016-11-08 11:34:45 +1100344 if (pmd_downgrade) {
345 /*
346 * Make sure 'entry' remains valid while we drop
347 * mapping->tree_lock.
348 */
349 entry = lock_slot(mapping, slot);
350 }
351
Jan Karaac401cc2016-05-12 18:29:18 +0200352 spin_unlock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100353 /*
354 * Besides huge zero pages the only other thing that gets
355 * downgraded are empty entries which don't need to be
356 * unmapped.
357 */
358 if (pmd_downgrade && dax_is_zero_entry(entry))
359 unmap_mapping_range(mapping,
360 (index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
361
Jan Kara0cb80b42016-12-12 21:34:12 -0500362 err = radix_tree_preload(
363 mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
364 if (err) {
365 if (pmd_downgrade)
366 put_locked_mapping_entry(mapping, index, entry);
367 return ERR_PTR(err);
368 }
Jan Karaac401cc2016-05-12 18:29:18 +0200369 spin_lock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100370
371 if (pmd_downgrade) {
372 radix_tree_delete(&mapping->page_tree, index);
373 mapping->nrexceptional--;
374 dax_wake_mapping_entry_waiter(mapping, index, entry,
375 true);
376 }
377
378 entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);
379
380 err = __radix_tree_insert(&mapping->page_tree, index,
381 dax_radix_order(entry), entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200382 radix_tree_preload_end();
383 if (err) {
384 spin_unlock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100385 /*
386 * Someone already created the entry? This is a
387 * normal failure when inserting PMDs in a range
388 * that already contains PTEs. In that case we want
389 * to return -EEXIST immediately.
390 */
391 if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
Jan Karaac401cc2016-05-12 18:29:18 +0200392 goto restart;
Ross Zwisler642261a2016-11-08 11:34:45 +1100393 /*
394 * Our insertion of a DAX PMD entry failed, most
395 * likely because it collided with a PTE sized entry
396 * at a different index in the PMD range. We haven't
397 * inserted anything into the radix tree and have no
398 * waiters to wake.
399 */
Jan Karaac401cc2016-05-12 18:29:18 +0200400 return ERR_PTR(err);
401 }
402 /* Good, we have inserted empty locked entry into the tree. */
403 mapping->nrexceptional++;
404 spin_unlock_irq(&mapping->tree_lock);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100405 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200406 }
407 /* Normal page in radix tree? */
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100408 if (!radix_tree_exceptional_entry(entry)) {
409 struct page *page = entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200410
411 get_page(page);
412 spin_unlock_irq(&mapping->tree_lock);
413 lock_page(page);
414 /* Page got truncated? Retry... */
415 if (unlikely(page->mapping != mapping)) {
416 unlock_page(page);
417 put_page(page);
418 goto restart;
419 }
420 return page;
421 }
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100422 entry = lock_slot(mapping, slot);
Ross Zwisler642261a2016-11-08 11:34:45 +1100423 out_unlock:
Jan Karaac401cc2016-05-12 18:29:18 +0200424 spin_unlock_irq(&mapping->tree_lock);
Ross Zwislere3ad61c2016-11-08 11:32:12 +1100425 return entry;
Jan Karaac401cc2016-05-12 18:29:18 +0200426}
427
Ross Zwisler63e95b52016-11-08 11:32:20 +1100428/*
429 * We do not necessarily hold the mapping->tree_lock when we call this
430 * function so it is possible that 'entry' is no longer a valid item in the
Ross Zwisler642261a2016-11-08 11:34:45 +1100431 * radix tree. This is okay because all we really need to do is to find the
432 * correct waitqueue where tasks might be waiting for that old 'entry' and
433 * wake them.
Ross Zwisler63e95b52016-11-08 11:32:20 +1100434 */
Jan Karaac401cc2016-05-12 18:29:18 +0200435void dax_wake_mapping_entry_waiter(struct address_space *mapping,
Ross Zwisler63e95b52016-11-08 11:32:20 +1100436 pgoff_t index, void *entry, bool wake_all)
Jan Karaac401cc2016-05-12 18:29:18 +0200437{
Ross Zwisler63e95b52016-11-08 11:32:20 +1100438 struct exceptional_entry_key key;
439 wait_queue_head_t *wq;
440
441 wq = dax_entry_waitqueue(mapping, index, entry, &key);
Jan Karaac401cc2016-05-12 18:29:18 +0200442
443 /*
444 * Checking for locked entry and prepare_to_wait_exclusive() happens
445 * under mapping->tree_lock, ditto for entry handling in our callers.
446 * So at this point all tasks that could have seen our entry locked
447 * must be in the waitqueue and the following check will see them.
448 */
Ross Zwisler63e95b52016-11-08 11:32:20 +1100449 if (waitqueue_active(wq))
Jan Karaac401cc2016-05-12 18:29:18 +0200450 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
Jan Karaac401cc2016-05-12 18:29:18 +0200451}
452
Jan Karaac401cc2016-05-12 18:29:18 +0200453/*
454 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
455 * entry to get unlocked before deleting it.
456 */
457int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
458{
459 void *entry;
460
461 spin_lock_irq(&mapping->tree_lock);
462 entry = get_unlocked_mapping_entry(mapping, index, NULL);
463 /*
464 * This gets called from truncate / punch_hole path. As such, the caller
465 * must hold locks protecting against concurrent modifications of the
466 * radix tree (usually fs-private i_mmap_sem for writing). Since the
467 * caller has seen exceptional entry for this index, we better find it
468 * at that index as well...
469 */
470 if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
471 spin_unlock_irq(&mapping->tree_lock);
472 return 0;
473 }
474 radix_tree_delete(&mapping->page_tree, index);
475 mapping->nrexceptional--;
476 spin_unlock_irq(&mapping->tree_lock);
Ross Zwisler63e95b52016-11-08 11:32:20 +1100477 dax_wake_mapping_entry_waiter(mapping, index, entry, true);
Jan Karaac401cc2016-05-12 18:29:18 +0200478
479 return 1;
480}
481
482/*
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800483 * The user has performed a load from a hole in the file. Allocating
484 * a new page in the file would cause excessive storage usage for
485 * workloads with sparse files. We allocate a page cache page instead.
486 * We'll kick it out of the page cache if it's ever written to,
487 * otherwise it will simply fall out of the page cache under memory
488 * pressure without ever having been dirtied.
489 */
Jan Karaac401cc2016-05-12 18:29:18 +0200490static int dax_load_hole(struct address_space *mapping, void *entry,
491 struct vm_fault *vmf)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800492{
Jan Karaac401cc2016-05-12 18:29:18 +0200493 struct page *page;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800494
Jan Karaac401cc2016-05-12 18:29:18 +0200495 /* Hole page already exists? Return it... */
496 if (!radix_tree_exceptional_entry(entry)) {
497 vmf->page = entry;
498 return VM_FAULT_LOCKED;
499 }
500
501 /* This will replace locked radix tree entry with a hole page */
502 page = find_or_create_page(mapping, vmf->pgoff,
503 vmf->gfp_mask | __GFP_ZERO);
Jan Karab1aa8122016-12-14 15:07:24 -0800504 if (!page)
Jan Karaac401cc2016-05-12 18:29:18 +0200505 return VM_FAULT_OOM;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800506 vmf->page = page;
507 return VM_FAULT_LOCKED;
508}
509
Christoph Hellwigb0d5e822016-09-19 11:24:49 +1000510static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
511 struct page *to, unsigned long vaddr)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800512{
Dan Williamsb2e0d162016-01-15 16:55:59 -0800513 struct blk_dax_ctl dax = {
Christoph Hellwigb0d5e822016-09-19 11:24:49 +1000514 .sector = sector,
515 .size = size,
Dan Williamsb2e0d162016-01-15 16:55:59 -0800516 };
Ross Zwislere2e05392015-08-18 13:55:41 -0600517 void *vto;
518
Dan Williamsb2e0d162016-01-15 16:55:59 -0800519 if (dax_map_atomic(bdev, &dax) < 0)
520 return PTR_ERR(dax.addr);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800521 vto = kmap_atomic(to);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800522 copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800523 kunmap_atomic(vto);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800524 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800525 return 0;
526}
527
Ross Zwisler642261a2016-11-08 11:34:45 +1100528/*
529 * By this point grab_mapping_entry() has ensured that we have a locked entry
530 * of the appropriate size so we don't have to worry about downgrading PMDs to
531 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
532 * already in the tree, we will skip the insertion and just dirty the PMD as
533 * appropriate.
534 */
Jan Karaac401cc2016-05-12 18:29:18 +0200535static void *dax_insert_mapping_entry(struct address_space *mapping,
536 struct vm_fault *vmf,
Ross Zwisler642261a2016-11-08 11:34:45 +1100537 void *entry, sector_t sector,
538 unsigned long flags)
Ross Zwisler9973c982016-01-22 15:10:47 -0800539{
540 struct radix_tree_root *page_tree = &mapping->page_tree;
Jan Karaac401cc2016-05-12 18:29:18 +0200541 int error = 0;
542 bool hole_fill = false;
543 void *new_entry;
544 pgoff_t index = vmf->pgoff;
Ross Zwisler9973c982016-01-22 15:10:47 -0800545
Jan Karaac401cc2016-05-12 18:29:18 +0200546 if (vmf->flags & FAULT_FLAG_WRITE)
Dmitry Monakhovd2b2a282016-02-05 15:36:55 -0800547 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
Ross Zwisler9973c982016-01-22 15:10:47 -0800548
Jan Karaac401cc2016-05-12 18:29:18 +0200549 /* Replacing hole page with block mapping? */
550 if (!radix_tree_exceptional_entry(entry)) {
551 hole_fill = true;
552 /*
553 * Unmap the page now before we remove it from page cache below.
554 * The page is locked so it cannot be faulted in again.
555 */
556 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
557 PAGE_SIZE, 0);
558 error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
559 if (error)
560 return ERR_PTR(error);
Ross Zwisler642261a2016-11-08 11:34:45 +1100561 } else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
562 /* replacing huge zero page with PMD block mapping */
563 unmap_mapping_range(mapping,
564 (vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
Ross Zwisler9973c982016-01-22 15:10:47 -0800565 }
566
Jan Karaac401cc2016-05-12 18:29:18 +0200567 spin_lock_irq(&mapping->tree_lock);
Ross Zwisler642261a2016-11-08 11:34:45 +1100568 new_entry = dax_radix_locked_entry(sector, flags);
569
Jan Karaac401cc2016-05-12 18:29:18 +0200570 if (hole_fill) {
571 __delete_from_page_cache(entry, NULL);
572 /* Drop pagecache reference */
573 put_page(entry);
Ross Zwisler642261a2016-11-08 11:34:45 +1100574 error = __radix_tree_insert(page_tree, index,
575 dax_radix_order(new_entry), new_entry);
Jan Karaac401cc2016-05-12 18:29:18 +0200576 if (error) {
577 new_entry = ERR_PTR(error);
Ross Zwisler9973c982016-01-22 15:10:47 -0800578 goto unlock;
579 }
Jan Karaac401cc2016-05-12 18:29:18 +0200580 mapping->nrexceptional++;
Ross Zwisler642261a2016-11-08 11:34:45 +1100581 } else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
582 /*
583 * Only swap our new entry into the radix tree if the current
584 * entry is a zero page or an empty entry. If a normal PTE or
585 * PMD entry is already in the tree, we leave it alone. This
586 * means that if we are trying to insert a PTE and the
587 * existing entry is a PMD, we will just leave the PMD in the
588 * tree and dirty it if necessary.
589 */
Johannes Weinerf7942432016-12-12 16:43:41 -0800590 struct radix_tree_node *node;
Jan Karaac401cc2016-05-12 18:29:18 +0200591 void **slot;
592 void *ret;
Ross Zwisler9973c982016-01-22 15:10:47 -0800593
Johannes Weinerf7942432016-12-12 16:43:41 -0800594 ret = __radix_tree_lookup(page_tree, index, &node, &slot);
Jan Karaac401cc2016-05-12 18:29:18 +0200595 WARN_ON_ONCE(ret != entry);
Johannes Weiner4d693d02016-12-12 16:43:49 -0800596 __radix_tree_replace(page_tree, node, slot,
597 new_entry, NULL, NULL);
Ross Zwisler9973c982016-01-22 15:10:47 -0800598 }
Jan Karaac401cc2016-05-12 18:29:18 +0200599 if (vmf->flags & FAULT_FLAG_WRITE)
Ross Zwisler9973c982016-01-22 15:10:47 -0800600 radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
601 unlock:
602 spin_unlock_irq(&mapping->tree_lock);
Jan Karaac401cc2016-05-12 18:29:18 +0200603 if (hole_fill) {
604 radix_tree_preload_end();
605 /*
606 * We don't need hole page anymore, it has been replaced with
607 * locked radix tree entry now.
608 */
609 if (mapping->a_ops->freepage)
610 mapping->a_ops->freepage(entry);
611 unlock_page(entry);
612 put_page(entry);
613 }
614 return new_entry;
Ross Zwisler9973c982016-01-22 15:10:47 -0800615}
616
617static int dax_writeback_one(struct block_device *bdev,
618 struct address_space *mapping, pgoff_t index, void *entry)
619{
620 struct radix_tree_root *page_tree = &mapping->page_tree;
Ross Zwisler9973c982016-01-22 15:10:47 -0800621 struct radix_tree_node *node;
622 struct blk_dax_ctl dax;
623 void **slot;
624 int ret = 0;
625
626 spin_lock_irq(&mapping->tree_lock);
627 /*
628 * Regular page slots are stabilized by the page lock even
629 * without the tree itself locked. These unlocked entries
630 * need verification under the tree lock.
631 */
632 if (!__radix_tree_lookup(page_tree, index, &node, &slot))
633 goto unlock;
634 if (*slot != entry)
635 goto unlock;
636
637 /* another fsync thread may have already written back this entry */
638 if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
639 goto unlock;
640
Ross Zwisler642261a2016-11-08 11:34:45 +1100641 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
642 dax_is_zero_entry(entry))) {
Ross Zwisler9973c982016-01-22 15:10:47 -0800643 ret = -EIO;
644 goto unlock;
645 }
646
Ross Zwisler642261a2016-11-08 11:34:45 +1100647 /*
648 * Even if dax_writeback_mapping_range() was given a wbc->range_start
649 * in the middle of a PMD, the 'index' we are given will be aligned to
650 * the start index of the PMD, as will the sector we pull from
651 * 'entry'. This allows us to flush for PMD_SIZE and not have to
652 * worry about partial PMD writebacks.
653 */
654 dax.sector = dax_radix_sector(entry);
655 dax.size = PAGE_SIZE << dax_radix_order(entry);
Ross Zwisler9973c982016-01-22 15:10:47 -0800656 spin_unlock_irq(&mapping->tree_lock);
657
658 /*
659 * We cannot hold tree_lock while calling dax_map_atomic() because it
660 * eventually calls cond_resched().
661 */
662 ret = dax_map_atomic(bdev, &dax);
663 if (ret < 0)
664 return ret;
665
666 if (WARN_ON_ONCE(ret < dax.size)) {
667 ret = -EIO;
668 goto unmap;
669 }
670
671 wb_cache_pmem(dax.addr, dax.size);
672
673 spin_lock_irq(&mapping->tree_lock);
674 radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
675 spin_unlock_irq(&mapping->tree_lock);
676 unmap:
677 dax_unmap_atomic(bdev, &dax);
678 return ret;
679
680 unlock:
681 spin_unlock_irq(&mapping->tree_lock);
682 return ret;
683}
684
685/*
686 * Flush the mapping to the persistent domain within the byte range of [start,
687 * end]. This is required by data integrity operations to ensure file data is
688 * on persistent storage prior to completion of the operation.
689 */
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800690int dax_writeback_mapping_range(struct address_space *mapping,
691 struct block_device *bdev, struct writeback_control *wbc)
Ross Zwisler9973c982016-01-22 15:10:47 -0800692{
693 struct inode *inode = mapping->host;
Ross Zwisler642261a2016-11-08 11:34:45 +1100694 pgoff_t start_index, end_index;
Ross Zwisler9973c982016-01-22 15:10:47 -0800695 pgoff_t indices[PAGEVEC_SIZE];
696 struct pagevec pvec;
697 bool done = false;
698 int i, ret = 0;
Ross Zwisler9973c982016-01-22 15:10:47 -0800699
700 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
701 return -EIO;
702
Ross Zwisler7f6d5b52016-02-26 15:19:55 -0800703 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
704 return 0;
705
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300706 start_index = wbc->range_start >> PAGE_SHIFT;
707 end_index = wbc->range_end >> PAGE_SHIFT;
Ross Zwisler9973c982016-01-22 15:10:47 -0800708
709 tag_pages_for_writeback(mapping, start_index, end_index);
710
711 pagevec_init(&pvec, 0);
712 while (!done) {
713 pvec.nr = find_get_entries_tag(mapping, start_index,
714 PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
715 pvec.pages, indices);
716
717 if (pvec.nr == 0)
718 break;
719
720 for (i = 0; i < pvec.nr; i++) {
721 if (indices[i] > end_index) {
722 done = true;
723 break;
724 }
725
726 ret = dax_writeback_one(bdev, mapping, indices[i],
727 pvec.pages[i]);
728 if (ret < 0)
729 return ret;
730 }
731 }
Ross Zwisler9973c982016-01-22 15:10:47 -0800732 return 0;
733}
734EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
735
Jan Karaac401cc2016-05-12 18:29:18 +0200736static int dax_insert_mapping(struct address_space *mapping,
Christoph Hellwig1aaba092016-09-19 11:24:49 +1000737 struct block_device *bdev, sector_t sector, size_t size,
738 void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800739{
Jan Kara1a29d852016-12-14 15:07:01 -0800740 unsigned long vaddr = vmf->address;
Dan Williamsb2e0d162016-01-15 16:55:59 -0800741 struct blk_dax_ctl dax = {
Christoph Hellwig1aaba092016-09-19 11:24:49 +1000742 .sector = sector,
743 .size = size,
Dan Williamsb2e0d162016-01-15 16:55:59 -0800744 };
Jan Karaac401cc2016-05-12 18:29:18 +0200745 void *ret;
746 void *entry = *entryp;
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800747
Jan Kara4d9a2c82016-05-12 18:29:20 +0200748 if (dax_map_atomic(bdev, &dax) < 0)
749 return PTR_ERR(dax.addr);
Dan Williamsb2e0d162016-01-15 16:55:59 -0800750 dax_unmap_atomic(bdev, &dax);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800751
Ross Zwisler642261a2016-11-08 11:34:45 +1100752 ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
Jan Kara4d9a2c82016-05-12 18:29:20 +0200753 if (IS_ERR(ret))
754 return PTR_ERR(ret);
Jan Karaac401cc2016-05-12 18:29:18 +0200755 *entryp = ret;
Ross Zwisler9973c982016-01-22 15:10:47 -0800756
Jan Kara4d9a2c82016-05-12 18:29:20 +0200757 return vm_insert_mixed(vma, vaddr, dax.pfn);
Matthew Wilcoxf7ca90b2015-02-16 15:59:02 -0800758}
759
Dave Chinnerce5c5d52015-06-04 09:18:18 +1000760/**
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700761 * dax_pfn_mkwrite - handle first write to DAX page
762 * @vma: The virtual memory area where the fault occurred
763 * @vmf: The description of the fault
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700764 */
765int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
766{
Ross Zwisler9973c982016-01-22 15:10:47 -0800767 struct file *file = vma->vm_file;
Jan Karaac401cc2016-05-12 18:29:18 +0200768 struct address_space *mapping = file->f_mapping;
769 void *entry;
770 pgoff_t index = vmf->pgoff;
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700771
Jan Karaac401cc2016-05-12 18:29:18 +0200772 spin_lock_irq(&mapping->tree_lock);
773 entry = get_unlocked_mapping_entry(mapping, index, NULL);
774 if (!entry || !radix_tree_exceptional_entry(entry))
775 goto out;
776 radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
777 put_unlocked_mapping_entry(mapping, index, entry);
778out:
779 spin_unlock_irq(&mapping->tree_lock);
Boaz Harrosh0e3b2102015-04-15 16:15:14 -0700780 return VM_FAULT_NOPAGE;
781}
782EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
783
Vishal Verma4b0228f2016-04-21 15:13:46 -0400784static bool dax_range_is_aligned(struct block_device *bdev,
785 unsigned int offset, unsigned int length)
786{
787 unsigned short sector_size = bdev_logical_block_size(bdev);
788
789 if (!IS_ALIGNED(offset, sector_size))
790 return false;
791 if (!IS_ALIGNED(length, sector_size))
792 return false;
793
794 return true;
795}
796
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200797int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
798 unsigned int offset, unsigned int length)
799{
800 struct blk_dax_ctl dax = {
801 .sector = sector,
802 .size = PAGE_SIZE,
803 };
804
Vishal Verma4b0228f2016-04-21 15:13:46 -0400805 if (dax_range_is_aligned(bdev, offset, length)) {
806 sector_t start_sector = dax.sector + (offset >> 9);
807
808 return blkdev_issue_zeroout(bdev, start_sector,
809 length >> 9, GFP_NOFS, true);
810 } else {
811 if (dax_map_atomic(bdev, &dax) < 0)
812 return PTR_ERR(dax.addr);
813 clear_pmem(dax.addr + offset, length);
Vishal Verma4b0228f2016-04-21 15:13:46 -0400814 dax_unmap_atomic(bdev, &dax);
815 }
Christoph Hellwig679c8bd2016-05-09 10:47:04 +0200816 return 0;
817}
818EXPORT_SYMBOL_GPL(__dax_zero_page_range);
819
Christoph Hellwiga254e562016-09-19 11:24:49 +1000820#ifdef CONFIG_FS_IOMAP
Ross Zwisler333ccc92016-11-08 11:33:09 +1100821static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
822{
823 return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
824}
825
Christoph Hellwiga254e562016-09-19 11:24:49 +1000826static loff_t
Ross Zwisler11c59c92016-11-08 11:32:46 +1100827dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
Christoph Hellwiga254e562016-09-19 11:24:49 +1000828 struct iomap *iomap)
829{
830 struct iov_iter *iter = data;
831 loff_t end = pos + length, done = 0;
832 ssize_t ret = 0;
833
834 if (iov_iter_rw(iter) == READ) {
835 end = min(end, i_size_read(inode));
836 if (pos >= end)
837 return 0;
838
839 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
840 return iov_iter_zero(min(length, end - pos), iter);
841 }
842
843 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
844 return -EIO;
845
846 while (pos < end) {
847 unsigned offset = pos & (PAGE_SIZE - 1);
848 struct blk_dax_ctl dax = { 0 };
849 ssize_t map_len;
850
Ross Zwisler333ccc92016-11-08 11:33:09 +1100851 dax.sector = dax_iomap_sector(iomap, pos);
Christoph Hellwiga254e562016-09-19 11:24:49 +1000852 dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
853 map_len = dax_map_atomic(iomap->bdev, &dax);
854 if (map_len < 0) {
855 ret = map_len;
856 break;
857 }
858
859 dax.addr += offset;
860 map_len -= offset;
861 if (map_len > end - pos)
862 map_len = end - pos;
863
864 if (iov_iter_rw(iter) == WRITE)
865 map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
866 else
867 map_len = copy_to_iter(dax.addr, map_len, iter);
868 dax_unmap_atomic(iomap->bdev, &dax);
869 if (map_len <= 0) {
870 ret = map_len ? map_len : -EFAULT;
871 break;
872 }
873
874 pos += map_len;
875 length -= map_len;
876 done += map_len;
877 }
878
879 return done ? done : ret;
880}
881
882/**
Ross Zwisler11c59c92016-11-08 11:32:46 +1100883 * dax_iomap_rw - Perform I/O to a DAX file
Christoph Hellwiga254e562016-09-19 11:24:49 +1000884 * @iocb: The control block for this I/O
885 * @iter: The addresses to do I/O from or to
886 * @ops: iomap ops passed from the file system
887 *
888 * This function performs read and write operations to directly mapped
889 * persistent memory. The callers needs to take care of read/write exclusion
890 * and evicting any page cache pages in the region under I/O.
891 */
892ssize_t
Ross Zwisler11c59c92016-11-08 11:32:46 +1100893dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
Christoph Hellwiga254e562016-09-19 11:24:49 +1000894 struct iomap_ops *ops)
895{
896 struct address_space *mapping = iocb->ki_filp->f_mapping;
897 struct inode *inode = mapping->host;
898 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
899 unsigned flags = 0;
900
901 if (iov_iter_rw(iter) == WRITE)
902 flags |= IOMAP_WRITE;
903
904 /*
905 * Yes, even DAX files can have page cache attached to them: A zeroed
906 * page is inserted into the pagecache when we have to serve a write
907 * fault on a hole. It should never be dirtied and can simply be
908 * dropped from the pagecache once we get real data for the page.
909 *
910 * XXX: This is racy against mmap, and there's nothing we can do about
911 * it. We'll eventually need to shift this down even further so that
912 * we can check if we allocated blocks over a hole first.
913 */
914 if (mapping->nrpages) {
915 ret = invalidate_inode_pages2_range(mapping,
916 pos >> PAGE_SHIFT,
917 (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
918 WARN_ON_ONCE(ret);
919 }
920
921 while (iov_iter_count(iter)) {
922 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
Ross Zwisler11c59c92016-11-08 11:32:46 +1100923 iter, dax_iomap_actor);
Christoph Hellwiga254e562016-09-19 11:24:49 +1000924 if (ret <= 0)
925 break;
926 pos += ret;
927 done += ret;
928 }
929
930 iocb->ki_pos += done;
931 return done ? done : ret;
932}
Ross Zwisler11c59c92016-11-08 11:32:46 +1100933EXPORT_SYMBOL_GPL(dax_iomap_rw);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000934
935/**
Ross Zwisler11c59c92016-11-08 11:32:46 +1100936 * dax_iomap_fault - handle a page fault on a DAX file
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000937 * @vma: The virtual memory area where the fault occurred
938 * @vmf: The description of the fault
939 * @ops: iomap ops passed from the file system
940 *
941 * When a page fault occurs, filesystems may call this helper in their fault
942 * or mkwrite handler for DAX files. Assumes the caller has done all the
943 * necessary locking for the page fault to proceed successfully.
944 */
Ross Zwisler11c59c92016-11-08 11:32:46 +1100945int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000946 struct iomap_ops *ops)
947{
948 struct address_space *mapping = vma->vm_file->f_mapping;
949 struct inode *inode = mapping->host;
Jan Kara1a29d852016-12-14 15:07:01 -0800950 unsigned long vaddr = vmf->address;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000951 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
952 sector_t sector;
953 struct iomap iomap = { 0 };
Jan Kara9484ab12016-11-10 10:26:50 +1100954 unsigned flags = IOMAP_FAULT;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000955 int error, major = 0;
Jan Karab1aa8122016-12-14 15:07:24 -0800956 int vmf_ret = 0;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000957 void *entry;
958
959 /*
960 * Check whether offset isn't beyond end of file now. Caller is supposed
961 * to hold locks serializing us with truncate / punch hole so this is
962 * a reliable test.
963 */
964 if (pos >= i_size_read(inode))
965 return VM_FAULT_SIGBUS;
966
Ross Zwisler642261a2016-11-08 11:34:45 +1100967 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000968 if (IS_ERR(entry)) {
969 error = PTR_ERR(entry);
970 goto out;
971 }
972
973 if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
974 flags |= IOMAP_WRITE;
975
976 /*
977 * Note that we don't bother to use iomap_apply here: DAX required
978 * the file system block size to be equal the page size, which means
979 * that we never have to deal with more than a single extent here.
980 */
981 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
982 if (error)
983 goto unlock_entry;
984 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
985 error = -EIO; /* fs corruption? */
Ross Zwisler15502902016-11-08 11:33:26 +1100986 goto finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000987 }
988
Ross Zwisler333ccc92016-11-08 11:33:09 +1100989 sector = dax_iomap_sector(&iomap, pos);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +1000990
991 if (vmf->cow_page) {
992 switch (iomap.type) {
993 case IOMAP_HOLE:
994 case IOMAP_UNWRITTEN:
995 clear_user_highpage(vmf->cow_page, vaddr);
996 break;
997 case IOMAP_MAPPED:
998 error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
999 vmf->cow_page, vaddr);
1000 break;
1001 default:
1002 WARN_ON_ONCE(1);
1003 error = -EIO;
1004 break;
1005 }
1006
1007 if (error)
Ross Zwisler15502902016-11-08 11:33:26 +11001008 goto finish_iomap;
Jan Karab1aa8122016-12-14 15:07:24 -08001009
1010 __SetPageUptodate(vmf->cow_page);
1011 vmf_ret = finish_fault(vmf);
1012 if (!vmf_ret)
1013 vmf_ret = VM_FAULT_DONE_COW;
Ross Zwisler15502902016-11-08 11:33:26 +11001014 goto finish_iomap;
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001015 }
1016
1017 switch (iomap.type) {
1018 case IOMAP_MAPPED:
1019 if (iomap.flags & IOMAP_F_NEW) {
1020 count_vm_event(PGMAJFAULT);
1021 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1022 major = VM_FAULT_MAJOR;
1023 }
1024 error = dax_insert_mapping(mapping, iomap.bdev, sector,
1025 PAGE_SIZE, &entry, vma, vmf);
1026 break;
1027 case IOMAP_UNWRITTEN:
1028 case IOMAP_HOLE:
Ross Zwisler15502902016-11-08 11:33:26 +11001029 if (!(vmf->flags & FAULT_FLAG_WRITE)) {
Jan Karab1aa8122016-12-14 15:07:24 -08001030 vmf_ret = dax_load_hole(mapping, entry, vmf);
Ross Zwisler15502902016-11-08 11:33:26 +11001031 break;
1032 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001033 /*FALLTHRU*/
1034 default:
1035 WARN_ON_ONCE(1);
1036 error = -EIO;
1037 break;
1038 }
1039
Ross Zwisler15502902016-11-08 11:33:26 +11001040 finish_iomap:
1041 if (ops->iomap_end) {
Jan Karab1aa8122016-12-14 15:07:24 -08001042 if (error || (vmf_ret & VM_FAULT_ERROR)) {
Ross Zwisler15502902016-11-08 11:33:26 +11001043 /* keep previous error */
1044 ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
1045 &iomap);
1046 } else {
1047 error = ops->iomap_end(inode, pos, PAGE_SIZE,
1048 PAGE_SIZE, flags, &iomap);
1049 }
1050 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001051 unlock_entry:
Jan Karab1aa8122016-12-14 15:07:24 -08001052 if (vmf_ret != VM_FAULT_LOCKED || error)
Ross Zwisler15502902016-11-08 11:33:26 +11001053 put_locked_mapping_entry(mapping, vmf->pgoff, entry);
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001054 out:
1055 if (error == -ENOMEM)
1056 return VM_FAULT_OOM | major;
1057 /* -EBUSY is fine, somebody else faulted on the same PTE */
1058 if (error < 0 && error != -EBUSY)
1059 return VM_FAULT_SIGBUS | major;
Jan Karab1aa8122016-12-14 15:07:24 -08001060 if (vmf_ret) {
Ross Zwisler15502902016-11-08 11:33:26 +11001061 WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
Jan Karab1aa8122016-12-14 15:07:24 -08001062 return vmf_ret;
Ross Zwisler15502902016-11-08 11:33:26 +11001063 }
Christoph Hellwiga7d73fe2016-09-19 11:24:50 +10001064 return VM_FAULT_NOPAGE | major;
1065}
Ross Zwisler11c59c92016-11-08 11:32:46 +11001066EXPORT_SYMBOL_GPL(dax_iomap_fault);
Ross Zwisler642261a2016-11-08 11:34:45 +11001067
1068#ifdef CONFIG_FS_DAX_PMD
1069/*
1070 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1071 * more often than one might expect in the below functions.
1072 */
1073#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1074
1075static int dax_pmd_insert_mapping(struct vm_area_struct *vma, pmd_t *pmd,
1076 struct vm_fault *vmf, unsigned long address,
1077 struct iomap *iomap, loff_t pos, bool write, void **entryp)
1078{
1079 struct address_space *mapping = vma->vm_file->f_mapping;
1080 struct block_device *bdev = iomap->bdev;
1081 struct blk_dax_ctl dax = {
1082 .sector = dax_iomap_sector(iomap, pos),
1083 .size = PMD_SIZE,
1084 };
1085 long length = dax_map_atomic(bdev, &dax);
1086 void *ret;
1087
1088 if (length < 0) /* dax_map_atomic() failed */
1089 return VM_FAULT_FALLBACK;
1090 if (length < PMD_SIZE)
1091 goto unmap_fallback;
1092 if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
1093 goto unmap_fallback;
1094 if (!pfn_t_devmap(dax.pfn))
1095 goto unmap_fallback;
1096
1097 dax_unmap_atomic(bdev, &dax);
1098
1099 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
1100 RADIX_DAX_PMD);
1101 if (IS_ERR(ret))
1102 return VM_FAULT_FALLBACK;
1103 *entryp = ret;
1104
1105 return vmf_insert_pfn_pmd(vma, address, pmd, dax.pfn, write);
1106
1107 unmap_fallback:
1108 dax_unmap_atomic(bdev, &dax);
1109 return VM_FAULT_FALLBACK;
1110}
1111
1112static int dax_pmd_load_hole(struct vm_area_struct *vma, pmd_t *pmd,
1113 struct vm_fault *vmf, unsigned long address,
1114 struct iomap *iomap, void **entryp)
1115{
1116 struct address_space *mapping = vma->vm_file->f_mapping;
1117 unsigned long pmd_addr = address & PMD_MASK;
1118 struct page *zero_page;
1119 spinlock_t *ptl;
1120 pmd_t pmd_entry;
1121 void *ret;
1122
1123 zero_page = mm_get_huge_zero_page(vma->vm_mm);
1124
1125 if (unlikely(!zero_page))
1126 return VM_FAULT_FALLBACK;
1127
1128 ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
1129 RADIX_DAX_PMD | RADIX_DAX_HZP);
1130 if (IS_ERR(ret))
1131 return VM_FAULT_FALLBACK;
1132 *entryp = ret;
1133
1134 ptl = pmd_lock(vma->vm_mm, pmd);
1135 if (!pmd_none(*pmd)) {
1136 spin_unlock(ptl);
1137 return VM_FAULT_FALLBACK;
1138 }
1139
1140 pmd_entry = mk_pmd(zero_page, vma->vm_page_prot);
1141 pmd_entry = pmd_mkhuge(pmd_entry);
1142 set_pmd_at(vma->vm_mm, pmd_addr, pmd, pmd_entry);
1143 spin_unlock(ptl);
1144 return VM_FAULT_NOPAGE;
1145}
1146
1147int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
1148 pmd_t *pmd, unsigned int flags, struct iomap_ops *ops)
1149{
1150 struct address_space *mapping = vma->vm_file->f_mapping;
1151 unsigned long pmd_addr = address & PMD_MASK;
1152 bool write = flags & FAULT_FLAG_WRITE;
Jan Kara9484ab12016-11-10 10:26:50 +11001153 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
Ross Zwisler642261a2016-11-08 11:34:45 +11001154 struct inode *inode = mapping->host;
1155 int result = VM_FAULT_FALLBACK;
1156 struct iomap iomap = { 0 };
1157 pgoff_t max_pgoff, pgoff;
1158 struct vm_fault vmf;
1159 void *entry;
1160 loff_t pos;
1161 int error;
1162
1163 /* Fall back to PTEs if we're going to COW */
1164 if (write && !(vma->vm_flags & VM_SHARED))
1165 goto fallback;
1166
1167 /* If the PMD would extend outside the VMA */
1168 if (pmd_addr < vma->vm_start)
1169 goto fallback;
1170 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1171 goto fallback;
1172
1173 /*
1174 * Check whether offset isn't beyond end of file now. Caller is
1175 * supposed to hold locks serializing us with truncate / punch hole so
1176 * this is a reliable test.
1177 */
1178 pgoff = linear_page_index(vma, pmd_addr);
1179 max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;
1180
1181 if (pgoff > max_pgoff)
1182 return VM_FAULT_SIGBUS;
1183
1184 /* If the PMD would extend beyond the file size */
1185 if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
1186 goto fallback;
1187
1188 /*
1189 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1190 * PMD or a HZP entry. If it can't (because a 4k page is already in
1191 * the tree, for instance), it will return -EEXIST and we just fall
1192 * back to 4k entries.
1193 */
1194 entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
1195 if (IS_ERR(entry))
1196 goto fallback;
1197
1198 /*
1199 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1200 * setting up a mapping, so really we're using iomap_begin() as a way
1201 * to look up our filesystem block.
1202 */
1203 pos = (loff_t)pgoff << PAGE_SHIFT;
1204 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
1205 if (error)
1206 goto unlock_entry;
1207 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1208 goto finish_iomap;
1209
1210 vmf.pgoff = pgoff;
1211 vmf.flags = flags;
1212 vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
1213
1214 switch (iomap.type) {
1215 case IOMAP_MAPPED:
1216 result = dax_pmd_insert_mapping(vma, pmd, &vmf, address,
1217 &iomap, pos, write, &entry);
1218 break;
1219 case IOMAP_UNWRITTEN:
1220 case IOMAP_HOLE:
1221 if (WARN_ON_ONCE(write))
1222 goto finish_iomap;
1223 result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
1224 &entry);
1225 break;
1226 default:
1227 WARN_ON_ONCE(1);
1228 break;
1229 }
1230
1231 finish_iomap:
1232 if (ops->iomap_end) {
1233 if (result == VM_FAULT_FALLBACK) {
1234 ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
1235 &iomap);
1236 } else {
1237 error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
1238 iomap_flags, &iomap);
1239 if (error)
1240 result = VM_FAULT_FALLBACK;
1241 }
1242 }
1243 unlock_entry:
1244 put_locked_mapping_entry(mapping, pgoff, entry);
1245 fallback:
1246 if (result == VM_FAULT_FALLBACK) {
1247 split_huge_pmd(vma, pmd, address);
1248 count_vm_event(THP_FAULT_FALLBACK);
1249 }
1250 return result;
1251}
1252EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
1253#endif /* CONFIG_FS_DAX_PMD */
Christoph Hellwiga254e562016-09-19 11:24:49 +10001254#endif /* CONFIG_FS_IOMAP */