blob: 826816ab2377998603d910ee17624d18eb83f0d2 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07002/*
3 * Copyright 2013 Red Hat Inc.
4 *
Jérôme Glissef813f212018-10-30 15:04:06 -07005 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07006 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11#include <linux/mm.h>
12#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070013#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070014#include <linux/rmap.h>
15#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070016#include <linux/slab.h>
17#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070018#include <linux/mmzone.h>
19#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070020#include <linux/swapops.h>
21#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070022#include <linux/memremap.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070023#include <linux/jump_label.h>
Jérôme Glisse55c0ece2019-05-13 17:20:28 -070024#include <linux/dma-mapping.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070025#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070026#include <linux/memory_hotplug.h>
27
28#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070029
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070030#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070031static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
32
Jérôme Glisse704f3f22019-05-13 17:19:48 -070033static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070034{
Jérôme Glissec0b12402017-09-08 16:11:27 -070035 struct hmm *hmm = READ_ONCE(mm->hmm);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070036
37 if (hmm && kref_get_unless_zero(&hmm->kref))
38 return hmm;
39
40 return NULL;
41}
42
43/**
44 * hmm_get_or_create - register HMM against an mm (HMM internal)
45 *
46 * @mm: mm struct to attach to
47 * Returns: returns an HMM object, either by referencing the existing
48 * (per-process) object, or by creating a new one.
49 *
50 * This is not intended to be used directly by device drivers. If mm already
51 * has an HMM struct then it get a reference on it and returns it. Otherwise
52 * it allocates an HMM struct, initializes it, associate it with the mm and
53 * returns it.
54 */
55static struct hmm *hmm_get_or_create(struct mm_struct *mm)
56{
57 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -070058 bool cleanup = false;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070059
Jérôme Glissec0b12402017-09-08 16:11:27 -070060 if (hmm)
61 return hmm;
62
63 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
64 if (!hmm)
65 return NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -070066 init_waitqueue_head(&hmm->wq);
Jérôme Glissec0b12402017-09-08 16:11:27 -070067 INIT_LIST_HEAD(&hmm->mirrors);
68 init_rwsem(&hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -070069 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070070 INIT_LIST_HEAD(&hmm->ranges);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070071 mutex_init(&hmm->lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070072 kref_init(&hmm->kref);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070073 hmm->notifiers = 0;
74 hmm->dead = false;
Jérôme Glissec0b12402017-09-08 16:11:27 -070075 hmm->mm = mm;
76
Jérôme Glissec0b12402017-09-08 16:11:27 -070077 spin_lock(&mm->page_table_lock);
78 if (!mm->hmm)
79 mm->hmm = hmm;
80 else
81 cleanup = true;
82 spin_unlock(&mm->page_table_lock);
83
Ralph Campbell86a2d592018-10-30 15:04:14 -070084 if (cleanup)
85 goto error;
86
87 /*
88 * We should only get here if hold the mmap_sem in write mode ie on
89 * registration of first mirror through hmm_mirror_register()
90 */
91 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
92 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
93 goto error_mm;
Jérôme Glissec0b12402017-09-08 16:11:27 -070094
Jérôme Glisse704f3f22019-05-13 17:19:48 -070095 return hmm;
Ralph Campbell86a2d592018-10-30 15:04:14 -070096
97error_mm:
98 spin_lock(&mm->page_table_lock);
99 if (mm->hmm == hmm)
100 mm->hmm = NULL;
101 spin_unlock(&mm->page_table_lock);
102error:
103 kfree(hmm);
104 return NULL;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700105}
106
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700107static void hmm_free(struct kref *kref)
108{
109 struct hmm *hmm = container_of(kref, struct hmm, kref);
110 struct mm_struct *mm = hmm->mm;
111
112 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
113
114 spin_lock(&mm->page_table_lock);
115 if (mm->hmm == hmm)
116 mm->hmm = NULL;
117 spin_unlock(&mm->page_table_lock);
118
119 kfree(hmm);
120}
121
122static inline void hmm_put(struct hmm *hmm)
123{
124 kref_put(&hmm->kref, hmm_free);
125}
126
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700127void hmm_mm_destroy(struct mm_struct *mm)
128{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700129 struct hmm *hmm;
130
131 spin_lock(&mm->page_table_lock);
132 hmm = mm_get_hmm(mm);
133 mm->hmm = NULL;
134 if (hmm) {
135 hmm->mm = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700136 hmm->dead = true;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700137 spin_unlock(&mm->page_table_lock);
138 hmm_put(hmm);
139 return;
140 }
141
142 spin_unlock(&mm->page_table_lock);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700143}
Jérôme Glissec0b12402017-09-08 16:11:27 -0700144
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700145static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700146{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700147 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700148 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700149 struct hmm_range *range;
150
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700151 /* Report this HMM as dying. */
152 hmm->dead = true;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700153
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700154 /* Wake-up everyone waiting on any range. */
155 mutex_lock(&hmm->lock);
Ralph Campbell085ea252019-05-06 16:29:39 -0700156 list_for_each_entry(range, &hmm->ranges, list)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700157 range->valid = false;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700158 wake_up_all(&hmm->wq);
159 mutex_unlock(&hmm->lock);
Ralph Campbelle1401512018-04-10 16:28:19 -0700160
161 down_write(&hmm->mirrors_sem);
162 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
163 list);
164 while (mirror) {
165 list_del_init(&mirror->list);
166 if (mirror->ops->release) {
167 /*
Ralph Campbell085ea252019-05-06 16:29:39 -0700168 * Drop mirrors_sem so the release callback can wait
169 * on any pending work that might itself trigger a
170 * mmu_notifier callback and thus would deadlock with
171 * us.
Ralph Campbelle1401512018-04-10 16:28:19 -0700172 */
173 up_write(&hmm->mirrors_sem);
174 mirror->ops->release(mirror);
175 down_write(&hmm->mirrors_sem);
176 }
177 mirror = list_first_entry_or_null(&hmm->mirrors,
178 struct hmm_mirror, list);
179 }
180 up_write(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700181
182 hmm_put(hmm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700183}
184
Michal Hocko93065ac2018-08-21 21:52:33 -0700185static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700186 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700187{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700188 struct hmm *hmm = mm_get_hmm(nrange->mm);
189 struct hmm_mirror *mirror;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700190 struct hmm_update update;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700191 struct hmm_range *range;
192 int ret = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700193
194 VM_BUG_ON(!hmm);
195
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700196 update.start = nrange->start;
197 update.end = nrange->end;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700198 update.event = HMM_UPDATE_INVALIDATE;
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700199 update.blockable = mmu_notifier_range_blockable(nrange);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700200
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700201 if (mmu_notifier_range_blockable(nrange))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700202 mutex_lock(&hmm->lock);
203 else if (!mutex_trylock(&hmm->lock)) {
204 ret = -EAGAIN;
205 goto out;
206 }
207 hmm->notifiers++;
208 list_for_each_entry(range, &hmm->ranges, list) {
209 if (update.end < range->start || update.start >= range->end)
210 continue;
211
212 range->valid = false;
213 }
214 mutex_unlock(&hmm->lock);
215
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700216 if (mmu_notifier_range_blockable(nrange))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700217 down_read(&hmm->mirrors_sem);
218 else if (!down_read_trylock(&hmm->mirrors_sem)) {
219 ret = -EAGAIN;
220 goto out;
221 }
222 list_for_each_entry(mirror, &hmm->mirrors, list) {
223 int ret;
224
225 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
Ralph Campbell085ea252019-05-06 16:29:39 -0700226 if (!update.blockable && ret == -EAGAIN)
227 break;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700228 }
229 up_read(&hmm->mirrors_sem);
230
231out:
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700232 hmm_put(hmm);
233 return ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700234}
235
236static void hmm_invalidate_range_end(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700237 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700238{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700239 struct hmm *hmm = mm_get_hmm(nrange->mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700240
241 VM_BUG_ON(!hmm);
242
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700243 mutex_lock(&hmm->lock);
244 hmm->notifiers--;
245 if (!hmm->notifiers) {
246 struct hmm_range *range;
247
248 list_for_each_entry(range, &hmm->ranges, list) {
249 if (range->valid)
250 continue;
251 range->valid = true;
252 }
253 wake_up_all(&hmm->wq);
254 }
255 mutex_unlock(&hmm->lock);
256
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700257 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700258}
259
260static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700261 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700262 .invalidate_range_start = hmm_invalidate_range_start,
263 .invalidate_range_end = hmm_invalidate_range_end,
264};
265
266/*
267 * hmm_mirror_register() - register a mirror against an mm
268 *
269 * @mirror: new mirror struct to register
270 * @mm: mm to register against
Ralph Campbell085ea252019-05-06 16:29:39 -0700271 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
Jérôme Glissec0b12402017-09-08 16:11:27 -0700272 *
273 * To start mirroring a process address space, the device driver must register
274 * an HMM mirror struct.
275 *
276 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
277 */
278int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
279{
280 /* Sanity check */
281 if (!mm || !mirror || !mirror->ops)
282 return -EINVAL;
283
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700284 mirror->hmm = hmm_get_or_create(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700285 if (!mirror->hmm)
286 return -ENOMEM;
287
288 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700289 list_add(&mirror->list, &mirror->hmm->mirrors);
290 up_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700291
292 return 0;
293}
294EXPORT_SYMBOL(hmm_mirror_register);
295
296/*
297 * hmm_mirror_unregister() - unregister a mirror
298 *
Ralph Campbell085ea252019-05-06 16:29:39 -0700299 * @mirror: mirror struct to unregister
Jérôme Glissec0b12402017-09-08 16:11:27 -0700300 *
301 * Stop mirroring a process address space, and cleanup.
302 */
303void hmm_mirror_unregister(struct hmm_mirror *mirror)
304{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700305 struct hmm *hmm = READ_ONCE(mirror->hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700306
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700307 if (hmm == NULL)
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700308 return;
309
Jérôme Glissec0b12402017-09-08 16:11:27 -0700310 down_write(&hmm->mirrors_sem);
Ralph Campbelle1401512018-04-10 16:28:19 -0700311 list_del_init(&mirror->list);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700312 /* To protect us against double unregister ... */
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700313 mirror->hmm = NULL;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700314 up_write(&hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700315
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700316 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700317}
318EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700319
Jérôme Glisse74eee182017-09-08 16:11:35 -0700320struct hmm_vma_walk {
321 struct hmm_range *range;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700322 struct dev_pagemap *pgmap;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700323 unsigned long last;
324 bool fault;
325 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700326};
327
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700328static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
329 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700330{
Kuehling, Felix9b1ae602019-05-10 19:53:24 +0000331 unsigned int flags = FAULT_FLAG_REMOTE;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700332 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700333 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700334 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700335 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700336
337 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700338 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700339 ret = handle_mm_fault(vma, addr, flags);
340 if (ret & VM_FAULT_RETRY)
Jérôme Glisse73231612019-05-13 17:19:58 -0700341 return -EAGAIN;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700342 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700343 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700344 return -EFAULT;
345 }
346
Jérôme Glisse73231612019-05-13 17:19:58 -0700347 return -EBUSY;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700348}
349
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700350static int hmm_pfns_bad(unsigned long addr,
351 unsigned long end,
352 struct mm_walk *walk)
353{
Jérôme Glissec7195472018-04-10 16:28:27 -0700354 struct hmm_vma_walk *hmm_vma_walk = walk->private;
355 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700356 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700357 unsigned long i;
358
359 i = (addr - range->start) >> PAGE_SHIFT;
360 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700361 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700362
363 return 0;
364}
365
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700366/*
367 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
368 * @start: range virtual start address (inclusive)
369 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700370 * @fault: should we fault or not ?
371 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700372 * @walk: mm_walk structure
Ralph Campbell085ea252019-05-06 16:29:39 -0700373 * Return: 0 on success, -EBUSY after page fault, or page fault error
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700374 *
375 * This function will be called whenever pmd_none() or pte_none() returns true,
376 * or whenever there is no page directory covering the virtual address range.
377 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700378static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
379 bool fault, bool write_fault,
380 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700381{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700382 struct hmm_vma_walk *hmm_vma_walk = walk->private;
383 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700384 uint64_t *pfns = range->pfns;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700385 unsigned long i, page_size;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700386
Jérôme Glisse74eee182017-09-08 16:11:35 -0700387 hmm_vma_walk->last = addr;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700388 page_size = hmm_range_page_size(range);
389 i = (addr - range->start) >> range->page_shift;
390
391 for (; addr < end; addr += page_size, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700392 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700393 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700394 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700395
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700396 ret = hmm_vma_do_fault(walk, addr, write_fault,
397 &pfns[i]);
Jérôme Glisse73231612019-05-13 17:19:58 -0700398 if (ret != -EBUSY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700399 return ret;
400 }
401 }
402
Jérôme Glisse73231612019-05-13 17:19:58 -0700403 return (fault || write_fault) ? -EBUSY : 0;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700404}
405
406static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
407 uint64_t pfns, uint64_t cpu_flags,
408 bool *fault, bool *write_fault)
409{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700410 struct hmm_range *range = hmm_vma_walk->range;
411
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700412 if (!hmm_vma_walk->fault)
413 return;
414
Jérôme Glisse023a0192019-05-13 17:20:05 -0700415 /*
416 * So we not only consider the individual per page request we also
417 * consider the default flags requested for the range. The API can
418 * be use in 2 fashions. The first one where the HMM user coalesce
419 * multiple page fault into one request and set flags per pfns for
420 * of those faults. The second one where the HMM user want to pre-
421 * fault a range with specific flags. For the latter one it is a
422 * waste to have the user pre-fill the pfn arrays with a default
423 * flags value.
424 */
425 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
426
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700427 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700428 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700429 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700430 /* If this is device memory than only fault if explicitly requested */
431 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
432 /* Do we fault on device memory ? */
433 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
434 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
435 *fault = true;
436 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700437 return;
438 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700439
440 /* If CPU page table is not valid then we need to fault */
441 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
442 /* Need to write fault ? */
443 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
444 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
445 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700446 *fault = true;
447 }
448}
449
450static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
451 const uint64_t *pfns, unsigned long npages,
452 uint64_t cpu_flags, bool *fault,
453 bool *write_fault)
454{
455 unsigned long i;
456
457 if (!hmm_vma_walk->fault) {
458 *fault = *write_fault = false;
459 return;
460 }
461
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700462 *fault = *write_fault = false;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700463 for (i = 0; i < npages; ++i) {
464 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
465 fault, write_fault);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700466 if ((*write_fault))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700467 return;
468 }
469}
470
471static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
472 struct mm_walk *walk)
473{
474 struct hmm_vma_walk *hmm_vma_walk = walk->private;
475 struct hmm_range *range = hmm_vma_walk->range;
476 bool fault, write_fault;
477 unsigned long i, npages;
478 uint64_t *pfns;
479
480 i = (addr - range->start) >> PAGE_SHIFT;
481 npages = (end - addr) >> PAGE_SHIFT;
482 pfns = &range->pfns[i];
483 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
484 0, &fault, &write_fault);
485 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
486}
487
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700488static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700489{
490 if (pmd_protnone(pmd))
491 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700492 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
493 range->flags[HMM_PFN_WRITE] :
494 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700495}
496
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700497static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
498{
499 if (!pud_present(pud))
500 return 0;
501 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
502 range->flags[HMM_PFN_WRITE] :
503 range->flags[HMM_PFN_VALID];
504}
505
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700506static int hmm_vma_handle_pmd(struct mm_walk *walk,
507 unsigned long addr,
508 unsigned long end,
509 uint64_t *pfns,
510 pmd_t pmd)
511{
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700512#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700513 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700514 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700515 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700516 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700517 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700518
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700519 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700520 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700521 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
522 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700523
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700524 if (pmd_protnone(pmd) || fault || write_fault)
525 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700526
527 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700528 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
529 if (pmd_devmap(pmd)) {
530 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
531 hmm_vma_walk->pgmap);
532 if (unlikely(!hmm_vma_walk->pgmap))
533 return -EBUSY;
534 }
Jérôme Glisse391aab12019-05-13 17:20:31 -0700535 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700536 }
537 if (hmm_vma_walk->pgmap) {
538 put_dev_pagemap(hmm_vma_walk->pgmap);
539 hmm_vma_walk->pgmap = NULL;
540 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700541 hmm_vma_walk->last = end;
542 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700543#else
544 /* If THP is not enabled then we should never reach that code ! */
545 return -EINVAL;
546#endif
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700547}
548
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700549static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700550{
Philip Yang789c2af2019-05-23 16:32:31 -0400551 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700552 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700553 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
554 range->flags[HMM_PFN_WRITE] :
555 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700556}
557
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700558static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
559 unsigned long end, pmd_t *pmdp, pte_t *ptep,
560 uint64_t *pfn)
561{
562 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700563 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700564 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700565 bool fault, write_fault;
566 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700567 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700568 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700569
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700570 *pfn = range->values[HMM_PFN_NONE];
Jérôme Glisse73231612019-05-13 17:19:58 -0700571 fault = write_fault = false;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700572
573 if (pte_none(pte)) {
Jérôme Glisse73231612019-05-13 17:19:58 -0700574 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
575 &fault, &write_fault);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700576 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700577 goto fault;
578 return 0;
579 }
580
581 if (!pte_present(pte)) {
582 swp_entry_t entry = pte_to_swp_entry(pte);
583
584 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700585 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700586 goto fault;
587 return 0;
588 }
589
590 /*
591 * This is a special swap entry, ignore migration, use
592 * device and report anything else as error.
593 */
594 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700595 cpu_flags = range->flags[HMM_PFN_VALID] |
596 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700597 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700598 range->flags[HMM_PFN_WRITE] : 0;
599 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
600 &fault, &write_fault);
601 if (fault || write_fault)
602 goto fault;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700603 *pfn = hmm_device_entry_from_pfn(range,
604 swp_offset(entry));
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700605 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700606 return 0;
607 }
608
609 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700610 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700611 pte_unmap(ptep);
612 hmm_vma_walk->last = addr;
613 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700614 pmdp, addr);
Jérôme Glisse73231612019-05-13 17:19:58 -0700615 return -EBUSY;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700616 }
617 return 0;
618 }
619
620 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700621 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700622 return -EFAULT;
Jérôme Glisse73231612019-05-13 17:19:58 -0700623 } else {
624 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
625 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
626 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700627 }
628
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700629 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700630 goto fault;
631
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700632 if (pte_devmap(pte)) {
633 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
634 hmm_vma_walk->pgmap);
635 if (unlikely(!hmm_vma_walk->pgmap))
636 return -EBUSY;
637 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
638 *pfn = range->values[HMM_PFN_SPECIAL];
639 return -EFAULT;
640 }
641
Jérôme Glisse391aab12019-05-13 17:20:31 -0700642 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700643 return 0;
644
645fault:
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700646 if (hmm_vma_walk->pgmap) {
647 put_dev_pagemap(hmm_vma_walk->pgmap);
648 hmm_vma_walk->pgmap = NULL;
649 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700650 pte_unmap(ptep);
651 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700652 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700653}
654
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700655static int hmm_vma_walk_pmd(pmd_t *pmdp,
656 unsigned long start,
657 unsigned long end,
658 struct mm_walk *walk)
659{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700660 struct hmm_vma_walk *hmm_vma_walk = walk->private;
661 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700662 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700663 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700664 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700665 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700666 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700667
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700668
669again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700670 pmd = READ_ONCE(*pmdp);
671 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700672 return hmm_vma_walk_hole(start, end, walk);
673
Jérôme Glissed08faca2018-10-30 15:04:20 -0700674 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700675 return hmm_pfns_bad(start, end, walk);
676
Jérôme Glissed08faca2018-10-30 15:04:20 -0700677 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
678 bool fault, write_fault;
679 unsigned long npages;
680 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700681
Jérôme Glissed08faca2018-10-30 15:04:20 -0700682 i = (addr - range->start) >> PAGE_SHIFT;
683 npages = (end - addr) >> PAGE_SHIFT;
684 pfns = &range->pfns[i];
685
686 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
687 0, &fault, &write_fault);
688 if (fault || write_fault) {
689 hmm_vma_walk->last = addr;
690 pmd_migration_entry_wait(vma->vm_mm, pmdp);
Jérôme Glisse73231612019-05-13 17:19:58 -0700691 return -EBUSY;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700692 }
693 return 0;
694 } else if (!pmd_present(pmd))
695 return hmm_pfns_bad(start, end, walk);
696
697 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700698 /*
699 * No need to take pmd_lock here, even if some other threads
700 * is splitting the huge pmd we will get that event through
701 * mmu_notifier callback.
702 *
703 * So just read pmd value and check again its a transparent
704 * huge or device mapping one and compute corresponding pfn
705 * values.
706 */
707 pmd = pmd_read_atomic(pmdp);
708 barrier();
709 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
710 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700711
Jérôme Glissed08faca2018-10-30 15:04:20 -0700712 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700713 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700714 }
715
Jérôme Glissed08faca2018-10-30 15:04:20 -0700716 /*
717 * We have handled all the valid case above ie either none, migration,
718 * huge or transparent huge. At this point either it is a valid pmd
719 * entry pointing to pte directory or it is a bad pmd that will not
720 * recover.
721 */
722 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700723 return hmm_pfns_bad(start, end, walk);
724
725 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700726 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700727 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700728 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700729
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700730 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
731 if (r) {
732 /* hmm_vma_handle_pte() did unmap pte directory */
733 hmm_vma_walk->last = addr;
734 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700735 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700736 }
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700737 if (hmm_vma_walk->pgmap) {
738 /*
739 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
740 * so that we can leverage get_dev_pagemap() optimization which
741 * will not re-take a reference on a pgmap if we already have
742 * one.
743 */
744 put_dev_pagemap(hmm_vma_walk->pgmap);
745 hmm_vma_walk->pgmap = NULL;
746 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700747 pte_unmap(ptep - 1);
748
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700749 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700750 return 0;
751}
752
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700753static int hmm_vma_walk_pud(pud_t *pudp,
754 unsigned long start,
755 unsigned long end,
756 struct mm_walk *walk)
757{
758 struct hmm_vma_walk *hmm_vma_walk = walk->private;
759 struct hmm_range *range = hmm_vma_walk->range;
760 unsigned long addr = start, next;
761 pmd_t *pmdp;
762 pud_t pud;
763 int ret;
764
765again:
766 pud = READ_ONCE(*pudp);
767 if (pud_none(pud))
768 return hmm_vma_walk_hole(start, end, walk);
769
770 if (pud_huge(pud) && pud_devmap(pud)) {
771 unsigned long i, npages, pfn;
772 uint64_t *pfns, cpu_flags;
773 bool fault, write_fault;
774
775 if (!pud_present(pud))
776 return hmm_vma_walk_hole(start, end, walk);
777
778 i = (addr - range->start) >> PAGE_SHIFT;
779 npages = (end - addr) >> PAGE_SHIFT;
780 pfns = &range->pfns[i];
781
782 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
783 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
784 cpu_flags, &fault, &write_fault);
785 if (fault || write_fault)
786 return hmm_vma_walk_hole_(addr, end, fault,
787 write_fault, walk);
788
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700789 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
790 for (i = 0; i < npages; ++i, ++pfn) {
791 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
792 hmm_vma_walk->pgmap);
793 if (unlikely(!hmm_vma_walk->pgmap))
794 return -EBUSY;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700795 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
796 cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700797 }
798 if (hmm_vma_walk->pgmap) {
799 put_dev_pagemap(hmm_vma_walk->pgmap);
800 hmm_vma_walk->pgmap = NULL;
801 }
802 hmm_vma_walk->last = end;
803 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700804 }
805
806 split_huge_pud(walk->vma, pudp, addr);
807 if (pud_none(*pudp))
808 goto again;
809
810 pmdp = pmd_offset(pudp, addr);
811 do {
812 next = pmd_addr_end(addr, end);
813 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
814 if (ret)
815 return ret;
816 } while (pmdp++, addr = next, addr != end);
817
818 return 0;
819}
820
Jérôme Glisse63d50662019-05-13 17:20:18 -0700821static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
822 unsigned long start, unsigned long end,
823 struct mm_walk *walk)
824{
825#ifdef CONFIG_HUGETLB_PAGE
826 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
827 struct hmm_vma_walk *hmm_vma_walk = walk->private;
828 struct hmm_range *range = hmm_vma_walk->range;
829 struct vm_area_struct *vma = walk->vma;
830 struct hstate *h = hstate_vma(vma);
831 uint64_t orig_pfn, cpu_flags;
832 bool fault, write_fault;
833 spinlock_t *ptl;
834 pte_t entry;
835 int ret = 0;
836
837 size = 1UL << huge_page_shift(h);
838 mask = size - 1;
839 if (range->page_shift != PAGE_SHIFT) {
840 /* Make sure we are looking at full page. */
841 if (start & mask)
842 return -EINVAL;
843 if (end < (start + size))
844 return -EINVAL;
845 pfn_inc = size >> PAGE_SHIFT;
846 } else {
847 pfn_inc = 1;
848 size = PAGE_SIZE;
849 }
850
851
852 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
853 entry = huge_ptep_get(pte);
854
855 i = (start - range->start) >> range->page_shift;
856 orig_pfn = range->pfns[i];
857 range->pfns[i] = range->values[HMM_PFN_NONE];
858 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
859 fault = write_fault = false;
860 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
861 &fault, &write_fault);
862 if (fault || write_fault) {
863 ret = -ENOENT;
864 goto unlock;
865 }
866
867 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
868 for (; addr < end; addr += size, i++, pfn += pfn_inc)
Jérôme Glisse391aab12019-05-13 17:20:31 -0700869 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
870 cpu_flags;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700871 hmm_vma_walk->last = end;
872
873unlock:
874 spin_unlock(ptl);
875
876 if (ret == -ENOENT)
877 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
878
879 return ret;
880#else /* CONFIG_HUGETLB_PAGE */
881 return -EINVAL;
882#endif
883}
884
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700885static void hmm_pfns_clear(struct hmm_range *range,
886 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700887 unsigned long addr,
888 unsigned long end)
889{
890 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700891 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700892}
893
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700894/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700895 * hmm_range_register() - start tracking change to CPU page table over a range
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700896 * @range: range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700897 * @mm: the mm struct for the range of virtual address
898 * @start: start virtual address (inclusive)
899 * @end: end virtual address (exclusive)
Jérôme Glisse63d50662019-05-13 17:20:18 -0700900 * @page_shift: expect page shift for the range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700901 * Returns 0 on success, -EFAULT if the address space is no longer valid
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700902 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700903 * Track updates to the CPU page table see include/linux/hmm.h
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700904 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700905int hmm_range_register(struct hmm_range *range,
906 struct mm_struct *mm,
907 unsigned long start,
Jérôme Glisse63d50662019-05-13 17:20:18 -0700908 unsigned long end,
909 unsigned page_shift)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700910{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700911 unsigned long mask = ((1UL << page_shift) - 1UL);
Ralph Campbell085ea252019-05-06 16:29:39 -0700912 struct hmm *hmm;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700913
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700914 range->valid = false;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700915 range->hmm = NULL;
916
Jérôme Glisse63d50662019-05-13 17:20:18 -0700917 if ((start & mask) || (end & mask))
918 return -EINVAL;
919 if (start >= end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700920 return -EINVAL;
921
Jérôme Glisse63d50662019-05-13 17:20:18 -0700922 range->page_shift = page_shift;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700923 range->start = start;
924 range->end = end;
925
Ralph Campbell085ea252019-05-06 16:29:39 -0700926 hmm = hmm_get_or_create(mm);
927 if (!hmm)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700928 return -EFAULT;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700929
930 /* Check if hmm_mm_destroy() was call. */
Ralph Campbell085ea252019-05-06 16:29:39 -0700931 if (hmm->mm == NULL || hmm->dead) {
932 hmm_put(hmm);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700933 return -EFAULT;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700934 }
935
Ralph Campbell085ea252019-05-06 16:29:39 -0700936 /* Initialize range to track CPU page table updates. */
937 mutex_lock(&hmm->lock);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700938
Ralph Campbell085ea252019-05-06 16:29:39 -0700939 range->hmm = hmm;
940 list_add_rcu(&range->list, &hmm->ranges);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700941
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700942 /*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700943 * If there are any concurrent notifiers we have to wait for them for
944 * the range to be valid (see hmm_range_wait_until_valid()).
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700945 */
Ralph Campbell085ea252019-05-06 16:29:39 -0700946 if (!hmm->notifiers)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700947 range->valid = true;
Ralph Campbell085ea252019-05-06 16:29:39 -0700948 mutex_unlock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700949
950 return 0;
951}
952EXPORT_SYMBOL(hmm_range_register);
953
954/*
955 * hmm_range_unregister() - stop tracking change to CPU page table over a range
956 * @range: range
957 *
958 * Range struct is used to track updates to the CPU page table after a call to
959 * hmm_range_register(). See include/linux/hmm.h for how to use it.
960 */
961void hmm_range_unregister(struct hmm_range *range)
962{
Ralph Campbell085ea252019-05-06 16:29:39 -0700963 struct hmm *hmm = range->hmm;
964
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700965 /* Sanity check this really should not happen. */
Ralph Campbell085ea252019-05-06 16:29:39 -0700966 if (hmm == NULL || range->end <= range->start)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700967 return;
968
Ralph Campbell085ea252019-05-06 16:29:39 -0700969 mutex_lock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700970 list_del_rcu(&range->list);
Ralph Campbell085ea252019-05-06 16:29:39 -0700971 mutex_unlock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700972
973 /* Drop reference taken by hmm_range_register() */
974 range->valid = false;
Ralph Campbell085ea252019-05-06 16:29:39 -0700975 hmm_put(hmm);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700976 range->hmm = NULL;
977}
978EXPORT_SYMBOL(hmm_range_unregister);
979
980/*
981 * hmm_range_snapshot() - snapshot CPU page table for a range
982 * @range: range
Ralph Campbell085ea252019-05-06 16:29:39 -0700983 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700984 * permission (for instance asking for write and range is read only),
985 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
986 * vma or it is illegal to access that range), number of valid pages
987 * in range->pfns[] (from range start address).
988 *
989 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
990 * validity is tracked by range struct. See in include/linux/hmm.h for example
991 * on how to use.
992 */
993long hmm_range_snapshot(struct hmm_range *range)
994{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700995 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700996 unsigned long start = range->start, end;
997 struct hmm_vma_walk hmm_vma_walk;
998 struct hmm *hmm = range->hmm;
999 struct vm_area_struct *vma;
1000 struct mm_walk mm_walk;
1001
1002 /* Check if hmm_mm_destroy() was call. */
1003 if (hmm->mm == NULL || hmm->dead)
1004 return -EFAULT;
1005
1006 do {
1007 /* If range is no longer valid force retry. */
1008 if (!range->valid)
1009 return -EAGAIN;
1010
1011 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001012 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001013 return -EFAULT;
1014
Jérôme Glisse63d50662019-05-13 17:20:18 -07001015 if (is_vm_hugetlb_page(vma)) {
Jason Gunthorpe1c2308f02019-05-27 17:02:21 -03001016 if (huge_page_shift(hstate_vma(vma)) !=
1017 range->page_shift &&
Jérôme Glisse63d50662019-05-13 17:20:18 -07001018 range->page_shift != PAGE_SHIFT)
1019 return -EINVAL;
1020 } else {
1021 if (range->page_shift != PAGE_SHIFT)
1022 return -EINVAL;
1023 }
1024
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001025 if (!(vma->vm_flags & VM_READ)) {
1026 /*
1027 * If vma do not allow read access, then assume that it
1028 * does not allow write access, either. HMM does not
1029 * support architecture that allow write without read.
1030 */
1031 hmm_pfns_clear(range, range->pfns,
1032 range->start, range->end);
1033 return -EPERM;
1034 }
1035
1036 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001037 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001038 hmm_vma_walk.last = start;
1039 hmm_vma_walk.fault = false;
1040 hmm_vma_walk.range = range;
1041 mm_walk.private = &hmm_vma_walk;
1042 end = min(range->end, vma->vm_end);
1043
1044 mm_walk.vma = vma;
1045 mm_walk.mm = vma->vm_mm;
1046 mm_walk.pte_entry = NULL;
1047 mm_walk.test_walk = NULL;
1048 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001049 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001050 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1051 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001052 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001053
1054 walk_page_range(start, end, &mm_walk);
1055 start = end;
1056 } while (start < range->end);
1057
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001058 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001059}
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001060EXPORT_SYMBOL(hmm_range_snapshot);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001061
1062/*
Jérôme Glisse73231612019-05-13 17:19:58 -07001063 * hmm_range_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -07001064 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -07001065 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001066 * Return: number of valid pages in range->pfns[] (from range start
Jérôme Glisse73231612019-05-13 17:19:58 -07001067 * address). This may be zero. If the return value is negative,
1068 * then one of the following values may be returned:
1069 *
1070 * -EINVAL invalid arguments or mm or virtual address are in an
Jérôme Glisse63d50662019-05-13 17:20:18 -07001071 * invalid vma (for instance device file vma).
Jérôme Glisse73231612019-05-13 17:19:58 -07001072 * -ENOMEM: Out of memory.
1073 * -EPERM: Invalid permission (for instance asking for write and
1074 * range is read only).
1075 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1076 * happens if block argument is false.
1077 * -EBUSY: If the the range is being invalidated and you should wait
1078 * for invalidation to finish.
1079 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1080 * that range), number of valid pages in range->pfns[] (from
1081 * range start address).
Jérôme Glisse74eee182017-09-08 16:11:35 -07001082 *
1083 * This is similar to a regular CPU page fault except that it will not trigger
Jérôme Glisse73231612019-05-13 17:19:58 -07001084 * any memory migration if the memory being faulted is not accessible by CPUs
1085 * and caller does not ask for migration.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001086 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -07001087 * On error, for one virtual address in the range, the function will mark the
1088 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001089 */
Jérôme Glisse73231612019-05-13 17:19:58 -07001090long hmm_range_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -07001091{
Jérôme Glisse63d50662019-05-13 17:20:18 -07001092 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001093 unsigned long start = range->start, end;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001094 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001095 struct hmm *hmm = range->hmm;
1096 struct vm_area_struct *vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001097 struct mm_walk mm_walk;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001098 int ret;
1099
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001100 /* Check if hmm_mm_destroy() was call. */
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001101 if (hmm->mm == NULL || hmm->dead)
1102 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001103
1104 do {
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001105 /* If range is no longer valid force retry. */
1106 if (!range->valid) {
1107 up_read(&hmm->mm->mmap_sem);
1108 return -EAGAIN;
1109 }
Jérôme Glisse74eee182017-09-08 16:11:35 -07001110
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001111 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001112 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001113 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001114
Jérôme Glisse63d50662019-05-13 17:20:18 -07001115 if (is_vm_hugetlb_page(vma)) {
1116 if (huge_page_shift(hstate_vma(vma)) !=
1117 range->page_shift &&
1118 range->page_shift != PAGE_SHIFT)
1119 return -EINVAL;
1120 } else {
1121 if (range->page_shift != PAGE_SHIFT)
1122 return -EINVAL;
1123 }
1124
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001125 if (!(vma->vm_flags & VM_READ)) {
1126 /*
1127 * If vma do not allow read access, then assume that it
1128 * does not allow write access, either. HMM does not
1129 * support architecture that allow write without read.
1130 */
1131 hmm_pfns_clear(range, range->pfns,
1132 range->start, range->end);
1133 return -EPERM;
1134 }
1135
1136 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001137 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001138 hmm_vma_walk.last = start;
1139 hmm_vma_walk.fault = true;
1140 hmm_vma_walk.block = block;
1141 hmm_vma_walk.range = range;
1142 mm_walk.private = &hmm_vma_walk;
1143 end = min(range->end, vma->vm_end);
1144
1145 mm_walk.vma = vma;
1146 mm_walk.mm = vma->vm_mm;
1147 mm_walk.pte_entry = NULL;
1148 mm_walk.test_walk = NULL;
1149 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001150 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001151 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1152 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001153 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001154
1155 do {
1156 ret = walk_page_range(start, end, &mm_walk);
1157 start = hmm_vma_walk.last;
1158
1159 /* Keep trying while the range is valid. */
1160 } while (ret == -EBUSY && range->valid);
1161
1162 if (ret) {
1163 unsigned long i;
1164
1165 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1166 hmm_pfns_clear(range, &range->pfns[i],
1167 hmm_vma_walk.last, range->end);
1168 return ret;
1169 }
1170 start = end;
1171
1172 } while (start < range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001173
Jérôme Glisse73231612019-05-13 17:19:58 -07001174 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001175}
Jérôme Glisse73231612019-05-13 17:19:58 -07001176EXPORT_SYMBOL(hmm_range_fault);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001177
1178/**
1179 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1180 * @range: range being faulted
1181 * @device: device against to dma map page to
1182 * @daddrs: dma address of mapped pages
1183 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001184 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001185 * drop and you need to try again, some other error value otherwise
1186 *
1187 * Note same usage pattern as hmm_range_fault().
1188 */
1189long hmm_range_dma_map(struct hmm_range *range,
1190 struct device *device,
1191 dma_addr_t *daddrs,
1192 bool block)
1193{
1194 unsigned long i, npages, mapped;
1195 long ret;
1196
1197 ret = hmm_range_fault(range, block);
1198 if (ret <= 0)
1199 return ret ? ret : -EBUSY;
1200
1201 npages = (range->end - range->start) >> PAGE_SHIFT;
1202 for (i = 0, mapped = 0; i < npages; ++i) {
1203 enum dma_data_direction dir = DMA_TO_DEVICE;
1204 struct page *page;
1205
1206 /*
1207 * FIXME need to update DMA API to provide invalid DMA address
1208 * value instead of a function to test dma address value. This
1209 * would remove lot of dumb code duplicated accross many arch.
1210 *
1211 * For now setting it to 0 here is good enough as the pfns[]
1212 * value is what is use to check what is valid and what isn't.
1213 */
1214 daddrs[i] = 0;
1215
Jérôme Glisse391aab12019-05-13 17:20:31 -07001216 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001217 if (page == NULL)
1218 continue;
1219
1220 /* Check if range is being invalidated */
1221 if (!range->valid) {
1222 ret = -EBUSY;
1223 goto unmap;
1224 }
1225
1226 /* If it is read and write than map bi-directional. */
1227 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1228 dir = DMA_BIDIRECTIONAL;
1229
1230 daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
1231 if (dma_mapping_error(device, daddrs[i])) {
1232 ret = -EFAULT;
1233 goto unmap;
1234 }
1235
1236 mapped++;
1237 }
1238
1239 return mapped;
1240
1241unmap:
1242 for (npages = i, i = 0; (i < npages) && mapped; ++i) {
1243 enum dma_data_direction dir = DMA_TO_DEVICE;
1244 struct page *page;
1245
Jérôme Glisse391aab12019-05-13 17:20:31 -07001246 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001247 if (page == NULL)
1248 continue;
1249
1250 if (dma_mapping_error(device, daddrs[i]))
1251 continue;
1252
1253 /* If it is read and write than map bi-directional. */
1254 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1255 dir = DMA_BIDIRECTIONAL;
1256
1257 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1258 mapped--;
1259 }
1260
1261 return ret;
1262}
1263EXPORT_SYMBOL(hmm_range_dma_map);
1264
1265/**
1266 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1267 * @range: range being unmapped
1268 * @vma: the vma against which the range (optional)
1269 * @device: device against which dma map was done
1270 * @daddrs: dma address of mapped pages
1271 * @dirty: dirty page if it had the write flag set
Ralph Campbell085ea252019-05-06 16:29:39 -07001272 * Return: number of page unmapped on success, -EINVAL otherwise
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001273 *
1274 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1275 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1276 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1277 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1278 */
1279long hmm_range_dma_unmap(struct hmm_range *range,
1280 struct vm_area_struct *vma,
1281 struct device *device,
1282 dma_addr_t *daddrs,
1283 bool dirty)
1284{
1285 unsigned long i, npages;
1286 long cpages = 0;
1287
1288 /* Sanity check. */
1289 if (range->end <= range->start)
1290 return -EINVAL;
1291 if (!daddrs)
1292 return -EINVAL;
1293 if (!range->pfns)
1294 return -EINVAL;
1295
1296 npages = (range->end - range->start) >> PAGE_SHIFT;
1297 for (i = 0; i < npages; ++i) {
1298 enum dma_data_direction dir = DMA_TO_DEVICE;
1299 struct page *page;
1300
Jérôme Glisse391aab12019-05-13 17:20:31 -07001301 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001302 if (page == NULL)
1303 continue;
1304
1305 /* If it is read and write than map bi-directional. */
1306 if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
1307 dir = DMA_BIDIRECTIONAL;
1308
1309 /*
1310 * See comments in function description on why it is
1311 * safe here to call set_page_dirty()
1312 */
1313 if (dirty)
1314 set_page_dirty(page);
1315 }
1316
1317 /* Unmap and clear pfns/dma address */
1318 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1319 range->pfns[i] = range->values[HMM_PFN_NONE];
1320 /* FIXME see comments in hmm_vma_dma_map() */
1321 daddrs[i] = 0;
1322 cpages++;
1323 }
1324
1325 return cpages;
1326}
1327EXPORT_SYMBOL(hmm_range_dma_unmap);
Jérôme Glissec0b12402017-09-08 16:11:27 -07001328#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001329
1330
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001331#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001332struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
1333 unsigned long addr)
1334{
1335 struct page *page;
1336
1337 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1338 if (!page)
1339 return NULL;
1340 lock_page(page);
1341 return page;
1342}
1343EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
1344
1345
1346static void hmm_devmem_ref_release(struct percpu_ref *ref)
1347{
1348 struct hmm_devmem *devmem;
1349
1350 devmem = container_of(ref, struct hmm_devmem, ref);
1351 complete(&devmem->completion);
1352}
1353
1354static void hmm_devmem_ref_exit(void *data)
1355{
1356 struct percpu_ref *ref = data;
1357 struct hmm_devmem *devmem;
1358
1359 devmem = container_of(ref, struct hmm_devmem, ref);
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001360 wait_for_completion(&devmem->completion);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001361 percpu_ref_exit(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001362}
1363
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001364static void hmm_devmem_ref_kill(struct percpu_ref *ref)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001365{
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001366 percpu_ref_kill(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001367}
1368
Souptick Joarderb57e622e62019-03-11 23:28:10 -07001369static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001370 unsigned long addr,
1371 const struct page *page,
1372 unsigned int flags,
1373 pmd_t *pmdp)
1374{
1375 struct hmm_devmem *devmem = page->pgmap->data;
1376
1377 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1378}
1379
1380static void hmm_devmem_free(struct page *page, void *data)
1381{
1382 struct hmm_devmem *devmem = data;
1383
Dan Williams2fa147b2018-07-13 21:50:01 -07001384 page->mapping = NULL;
1385
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001386 devmem->ops->free(devmem, page);
1387}
1388
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001389/*
1390 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1391 *
1392 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1393 * @device: device struct to bind the resource too
1394 * @size: size in bytes of the device memory to add
Ralph Campbell085ea252019-05-06 16:29:39 -07001395 * Return: pointer to new hmm_devmem struct ERR_PTR otherwise
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001396 *
1397 * This function first finds an empty range of physical address big enough to
1398 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1399 * in turn allocates struct pages. It does not do anything beyond that; all
1400 * events affecting the memory will go through the various callbacks provided
1401 * by hmm_devmem_ops struct.
1402 *
1403 * Device driver should call this function during device initialization and
1404 * is then responsible of memory management. HMM only provides helpers.
1405 */
1406struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1407 struct device *device,
1408 unsigned long size)
1409{
1410 struct hmm_devmem *devmem;
1411 resource_size_t addr;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001412 void *result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001413 int ret;
1414
Dan Williamse76384882018-05-16 11:46:08 -07001415 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001416
Dan Williams58ef15b2018-12-28 00:35:07 -08001417 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001418 if (!devmem)
1419 return ERR_PTR(-ENOMEM);
1420
1421 init_completion(&devmem->completion);
1422 devmem->pfn_first = -1UL;
1423 devmem->pfn_last = -1UL;
1424 devmem->resource = NULL;
1425 devmem->device = device;
1426 devmem->ops = ops;
1427
1428 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1429 0, GFP_KERNEL);
1430 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001431 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001432
Dan Williams58ef15b2018-12-28 00:35:07 -08001433 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001434 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001435 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001436
1437 size = ALIGN(size, PA_SECTION_SIZE);
1438 addr = min((unsigned long)iomem_resource.end,
1439 (1UL << MAX_PHYSMEM_BITS) - 1);
1440 addr = addr - size + 1UL;
1441
1442 /*
1443 * FIXME add a new helper to quickly walk resource tree and find free
1444 * range
1445 *
1446 * FIXME what about ioport_resource resource ?
1447 */
1448 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1449 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1450 if (ret != REGION_DISJOINT)
1451 continue;
1452
1453 devmem->resource = devm_request_mem_region(device, addr, size,
1454 dev_name(device));
Dan Williams58ef15b2018-12-28 00:35:07 -08001455 if (!devmem->resource)
1456 return ERR_PTR(-ENOMEM);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001457 break;
1458 }
Dan Williams58ef15b2018-12-28 00:35:07 -08001459 if (!devmem->resource)
1460 return ERR_PTR(-ERANGE);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001461
1462 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1463 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1464 devmem->pfn_last = devmem->pfn_first +
1465 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001466 devmem->page_fault = hmm_devmem_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001467
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001468 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1469 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001470 devmem->pagemap.page_free = hmm_devmem_free;
1471 devmem->pagemap.altmap_valid = false;
1472 devmem->pagemap.ref = &devmem->ref;
1473 devmem->pagemap.data = devmem;
1474 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001475
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001476 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1477 if (IS_ERR(result))
1478 return result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001479 return devmem;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001480}
Dan Williams02917e92018-12-28 00:35:15 -08001481EXPORT_SYMBOL_GPL(hmm_devmem_add);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001482
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001483struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1484 struct device *device,
1485 struct resource *res)
1486{
1487 struct hmm_devmem *devmem;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001488 void *result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001489 int ret;
1490
1491 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1492 return ERR_PTR(-EINVAL);
1493
Dan Williamse76384882018-05-16 11:46:08 -07001494 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001495
Dan Williams58ef15b2018-12-28 00:35:07 -08001496 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001497 if (!devmem)
1498 return ERR_PTR(-ENOMEM);
1499
1500 init_completion(&devmem->completion);
1501 devmem->pfn_first = -1UL;
1502 devmem->pfn_last = -1UL;
1503 devmem->resource = res;
1504 devmem->device = device;
1505 devmem->ops = ops;
1506
1507 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1508 0, GFP_KERNEL);
1509 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001510 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001511
Dan Williams58ef15b2018-12-28 00:35:07 -08001512 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1513 &devmem->ref);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001514 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001515 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001516
1517 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1518 devmem->pfn_last = devmem->pfn_first +
1519 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001520 devmem->page_fault = hmm_devmem_fault;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001521
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001522 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1523 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001524 devmem->pagemap.page_free = hmm_devmem_free;
1525 devmem->pagemap.altmap_valid = false;
1526 devmem->pagemap.ref = &devmem->ref;
1527 devmem->pagemap.data = devmem;
1528 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001529
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001530 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1531 if (IS_ERR(result))
1532 return result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001533 return devmem;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001534}
Dan Williams02917e92018-12-28 00:35:15 -08001535EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001536
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001537/*
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001538 * A device driver that wants to handle multiple devices memory through a
1539 * single fake device can use hmm_device to do so. This is purely a helper
1540 * and it is not needed to make use of any HMM functionality.
1541 */
1542#define HMM_DEVICE_MAX 256
1543
1544static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1545static DEFINE_SPINLOCK(hmm_device_lock);
1546static struct class *hmm_device_class;
1547static dev_t hmm_device_devt;
1548
1549static void hmm_device_release(struct device *device)
1550{
1551 struct hmm_device *hmm_device;
1552
1553 hmm_device = container_of(device, struct hmm_device, device);
1554 spin_lock(&hmm_device_lock);
1555 clear_bit(hmm_device->minor, hmm_device_mask);
1556 spin_unlock(&hmm_device_lock);
1557
1558 kfree(hmm_device);
1559}
1560
1561struct hmm_device *hmm_device_new(void *drvdata)
1562{
1563 struct hmm_device *hmm_device;
1564
1565 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1566 if (!hmm_device)
1567 return ERR_PTR(-ENOMEM);
1568
1569 spin_lock(&hmm_device_lock);
1570 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1571 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1572 spin_unlock(&hmm_device_lock);
1573 kfree(hmm_device);
1574 return ERR_PTR(-EBUSY);
1575 }
1576 set_bit(hmm_device->minor, hmm_device_mask);
1577 spin_unlock(&hmm_device_lock);
1578
1579 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1580 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1581 hmm_device->minor);
1582 hmm_device->device.release = hmm_device_release;
1583 dev_set_drvdata(&hmm_device->device, drvdata);
1584 hmm_device->device.class = hmm_device_class;
1585 device_initialize(&hmm_device->device);
1586
1587 return hmm_device;
1588}
1589EXPORT_SYMBOL(hmm_device_new);
1590
1591void hmm_device_put(struct hmm_device *hmm_device)
1592{
1593 put_device(&hmm_device->device);
1594}
1595EXPORT_SYMBOL(hmm_device_put);
1596
1597static int __init hmm_init(void)
1598{
1599 int ret;
1600
1601 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1602 HMM_DEVICE_MAX,
1603 "hmm_device");
1604 if (ret)
1605 return ret;
1606
1607 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1608 if (IS_ERR(hmm_device_class)) {
1609 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1610 return PTR_ERR(hmm_device_class);
1611 }
1612 return 0;
1613}
1614
1615device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001616#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */