blob: 3e07f32b94f85f5406cb31615f38d1a73f45060f [file] [log] [blame]
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07001/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
Jérôme Glissef813f212018-10-30 15:04:06 -070014 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070015 */
16/*
17 * Refer to include/linux/hmm.h for information about heterogeneous memory
18 * management or HMM for short.
19 */
20#include <linux/mm.h>
21#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070022#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070023#include <linux/rmap.h>
24#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070025#include <linux/slab.h>
26#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070027#include <linux/mmzone.h>
28#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070029#include <linux/swapops.h>
30#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070031#include <linux/memremap.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070032#include <linux/jump_label.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070033#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070034#include <linux/memory_hotplug.h>
35
36#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070037
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070038#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070039static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
40
Jérôme Glisse704f3f22019-05-13 17:19:48 -070041static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070042{
Jérôme Glissec0b12402017-09-08 16:11:27 -070043 struct hmm *hmm = READ_ONCE(mm->hmm);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070044
45 if (hmm && kref_get_unless_zero(&hmm->kref))
46 return hmm;
47
48 return NULL;
49}
50
51/**
52 * hmm_get_or_create - register HMM against an mm (HMM internal)
53 *
54 * @mm: mm struct to attach to
55 * Returns: returns an HMM object, either by referencing the existing
56 * (per-process) object, or by creating a new one.
57 *
58 * This is not intended to be used directly by device drivers. If mm already
59 * has an HMM struct then it get a reference on it and returns it. Otherwise
60 * it allocates an HMM struct, initializes it, associate it with the mm and
61 * returns it.
62 */
63static struct hmm *hmm_get_or_create(struct mm_struct *mm)
64{
65 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -070066 bool cleanup = false;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070067
Jérôme Glissec0b12402017-09-08 16:11:27 -070068 if (hmm)
69 return hmm;
70
71 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
72 if (!hmm)
73 return NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -070074 init_waitqueue_head(&hmm->wq);
Jérôme Glissec0b12402017-09-08 16:11:27 -070075 INIT_LIST_HEAD(&hmm->mirrors);
76 init_rwsem(&hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -070077 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070078 INIT_LIST_HEAD(&hmm->ranges);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070079 mutex_init(&hmm->lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070080 kref_init(&hmm->kref);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070081 hmm->notifiers = 0;
82 hmm->dead = false;
Jérôme Glissec0b12402017-09-08 16:11:27 -070083 hmm->mm = mm;
84
Jérôme Glissec0b12402017-09-08 16:11:27 -070085 spin_lock(&mm->page_table_lock);
86 if (!mm->hmm)
87 mm->hmm = hmm;
88 else
89 cleanup = true;
90 spin_unlock(&mm->page_table_lock);
91
Ralph Campbell86a2d592018-10-30 15:04:14 -070092 if (cleanup)
93 goto error;
94
95 /*
96 * We should only get here if hold the mmap_sem in write mode ie on
97 * registration of first mirror through hmm_mirror_register()
98 */
99 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
100 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
101 goto error_mm;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700102
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700103 return hmm;
Ralph Campbell86a2d592018-10-30 15:04:14 -0700104
105error_mm:
106 spin_lock(&mm->page_table_lock);
107 if (mm->hmm == hmm)
108 mm->hmm = NULL;
109 spin_unlock(&mm->page_table_lock);
110error:
111 kfree(hmm);
112 return NULL;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700113}
114
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700115static void hmm_free(struct kref *kref)
116{
117 struct hmm *hmm = container_of(kref, struct hmm, kref);
118 struct mm_struct *mm = hmm->mm;
119
120 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
121
122 spin_lock(&mm->page_table_lock);
123 if (mm->hmm == hmm)
124 mm->hmm = NULL;
125 spin_unlock(&mm->page_table_lock);
126
127 kfree(hmm);
128}
129
130static inline void hmm_put(struct hmm *hmm)
131{
132 kref_put(&hmm->kref, hmm_free);
133}
134
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700135void hmm_mm_destroy(struct mm_struct *mm)
136{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700137 struct hmm *hmm;
138
139 spin_lock(&mm->page_table_lock);
140 hmm = mm_get_hmm(mm);
141 mm->hmm = NULL;
142 if (hmm) {
143 hmm->mm = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700144 hmm->dead = true;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700145 spin_unlock(&mm->page_table_lock);
146 hmm_put(hmm);
147 return;
148 }
149
150 spin_unlock(&mm->page_table_lock);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700151}
Jérôme Glissec0b12402017-09-08 16:11:27 -0700152
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700153static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700154{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700155 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700156 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700157 struct hmm_range *range;
158
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700159 /* Report this HMM as dying. */
160 hmm->dead = true;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700161
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700162 /* Wake-up everyone waiting on any range. */
163 mutex_lock(&hmm->lock);
164 list_for_each_entry(range, &hmm->ranges, list) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700165 range->valid = false;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700166 }
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700167 wake_up_all(&hmm->wq);
168 mutex_unlock(&hmm->lock);
Ralph Campbelle1401512018-04-10 16:28:19 -0700169
170 down_write(&hmm->mirrors_sem);
171 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
172 list);
173 while (mirror) {
174 list_del_init(&mirror->list);
175 if (mirror->ops->release) {
176 /*
177 * Drop mirrors_sem so callback can wait on any pending
178 * work that might itself trigger mmu_notifier callback
179 * and thus would deadlock with us.
180 */
181 up_write(&hmm->mirrors_sem);
182 mirror->ops->release(mirror);
183 down_write(&hmm->mirrors_sem);
184 }
185 mirror = list_first_entry_or_null(&hmm->mirrors,
186 struct hmm_mirror, list);
187 }
188 up_write(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700189
190 hmm_put(hmm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700191}
192
Michal Hocko93065ac2018-08-21 21:52:33 -0700193static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700194 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700195{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700196 struct hmm *hmm = mm_get_hmm(nrange->mm);
197 struct hmm_mirror *mirror;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700198 struct hmm_update update;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700199 struct hmm_range *range;
200 int ret = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700201
202 VM_BUG_ON(!hmm);
203
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700204 update.start = nrange->start;
205 update.end = nrange->end;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700206 update.event = HMM_UPDATE_INVALIDATE;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700207 update.blockable = nrange->blockable;
208
209 if (nrange->blockable)
210 mutex_lock(&hmm->lock);
211 else if (!mutex_trylock(&hmm->lock)) {
212 ret = -EAGAIN;
213 goto out;
214 }
215 hmm->notifiers++;
216 list_for_each_entry(range, &hmm->ranges, list) {
217 if (update.end < range->start || update.start >= range->end)
218 continue;
219
220 range->valid = false;
221 }
222 mutex_unlock(&hmm->lock);
223
224 if (nrange->blockable)
225 down_read(&hmm->mirrors_sem);
226 else if (!down_read_trylock(&hmm->mirrors_sem)) {
227 ret = -EAGAIN;
228 goto out;
229 }
230 list_for_each_entry(mirror, &hmm->mirrors, list) {
231 int ret;
232
233 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
234 if (!update.blockable && ret == -EAGAIN) {
235 up_read(&hmm->mirrors_sem);
236 ret = -EAGAIN;
237 goto out;
238 }
239 }
240 up_read(&hmm->mirrors_sem);
241
242out:
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700243 hmm_put(hmm);
244 return ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700245}
246
247static void hmm_invalidate_range_end(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700248 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700249{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700250 struct hmm *hmm = mm_get_hmm(nrange->mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700251
252 VM_BUG_ON(!hmm);
253
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700254 mutex_lock(&hmm->lock);
255 hmm->notifiers--;
256 if (!hmm->notifiers) {
257 struct hmm_range *range;
258
259 list_for_each_entry(range, &hmm->ranges, list) {
260 if (range->valid)
261 continue;
262 range->valid = true;
263 }
264 wake_up_all(&hmm->wq);
265 }
266 mutex_unlock(&hmm->lock);
267
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700268 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700269}
270
271static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700272 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700273 .invalidate_range_start = hmm_invalidate_range_start,
274 .invalidate_range_end = hmm_invalidate_range_end,
275};
276
277/*
278 * hmm_mirror_register() - register a mirror against an mm
279 *
280 * @mirror: new mirror struct to register
281 * @mm: mm to register against
282 *
283 * To start mirroring a process address space, the device driver must register
284 * an HMM mirror struct.
285 *
286 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
287 */
288int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
289{
290 /* Sanity check */
291 if (!mm || !mirror || !mirror->ops)
292 return -EINVAL;
293
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700294 mirror->hmm = hmm_get_or_create(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700295 if (!mirror->hmm)
296 return -ENOMEM;
297
298 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700299 list_add(&mirror->list, &mirror->hmm->mirrors);
300 up_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700301
302 return 0;
303}
304EXPORT_SYMBOL(hmm_mirror_register);
305
306/*
307 * hmm_mirror_unregister() - unregister a mirror
308 *
309 * @mirror: new mirror struct to register
310 *
311 * Stop mirroring a process address space, and cleanup.
312 */
313void hmm_mirror_unregister(struct hmm_mirror *mirror)
314{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700315 struct hmm *hmm = READ_ONCE(mirror->hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700316
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700317 if (hmm == NULL)
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700318 return;
319
Jérôme Glissec0b12402017-09-08 16:11:27 -0700320 down_write(&hmm->mirrors_sem);
Ralph Campbelle1401512018-04-10 16:28:19 -0700321 list_del_init(&mirror->list);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700322 /* To protect us against double unregister ... */
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700323 mirror->hmm = NULL;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700324 up_write(&hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700325
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700326 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700327}
328EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700329
Jérôme Glisse74eee182017-09-08 16:11:35 -0700330struct hmm_vma_walk {
331 struct hmm_range *range;
332 unsigned long last;
333 bool fault;
334 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700335};
336
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700337static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
338 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700339{
340 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE;
341 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700342 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700343 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700344 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700345
346 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700347 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700348 ret = handle_mm_fault(vma, addr, flags);
349 if (ret & VM_FAULT_RETRY)
Jérôme Glisse73231612019-05-13 17:19:58 -0700350 return -EAGAIN;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700351 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700352 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700353 return -EFAULT;
354 }
355
Jérôme Glisse73231612019-05-13 17:19:58 -0700356 return -EBUSY;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700357}
358
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700359static int hmm_pfns_bad(unsigned long addr,
360 unsigned long end,
361 struct mm_walk *walk)
362{
Jérôme Glissec7195472018-04-10 16:28:27 -0700363 struct hmm_vma_walk *hmm_vma_walk = walk->private;
364 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700365 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700366 unsigned long i;
367
368 i = (addr - range->start) >> PAGE_SHIFT;
369 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700370 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700371
372 return 0;
373}
374
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700375/*
376 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
377 * @start: range virtual start address (inclusive)
378 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700379 * @fault: should we fault or not ?
380 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700381 * @walk: mm_walk structure
Jérôme Glisse73231612019-05-13 17:19:58 -0700382 * Returns: 0 on success, -EBUSY after page fault, or page fault error
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700383 *
384 * This function will be called whenever pmd_none() or pte_none() returns true,
385 * or whenever there is no page directory covering the virtual address range.
386 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700387static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
388 bool fault, bool write_fault,
389 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700390{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700391 struct hmm_vma_walk *hmm_vma_walk = walk->private;
392 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700393 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700394 unsigned long i;
395
Jérôme Glisse74eee182017-09-08 16:11:35 -0700396 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700397 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700398 for (; addr < end; addr += PAGE_SIZE, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700399 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700400 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700401 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700402
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700403 ret = hmm_vma_do_fault(walk, addr, write_fault,
404 &pfns[i]);
Jérôme Glisse73231612019-05-13 17:19:58 -0700405 if (ret != -EBUSY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700406 return ret;
407 }
408 }
409
Jérôme Glisse73231612019-05-13 17:19:58 -0700410 return (fault || write_fault) ? -EBUSY : 0;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700411}
412
413static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
414 uint64_t pfns, uint64_t cpu_flags,
415 bool *fault, bool *write_fault)
416{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700417 struct hmm_range *range = hmm_vma_walk->range;
418
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700419 if (!hmm_vma_walk->fault)
420 return;
421
422 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700423 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700424 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700425 /* If this is device memory than only fault if explicitly requested */
426 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
427 /* Do we fault on device memory ? */
428 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
429 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
430 *fault = true;
431 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700432 return;
433 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700434
435 /* If CPU page table is not valid then we need to fault */
436 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
437 /* Need to write fault ? */
438 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
439 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
440 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700441 *fault = true;
442 }
443}
444
445static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
446 const uint64_t *pfns, unsigned long npages,
447 uint64_t cpu_flags, bool *fault,
448 bool *write_fault)
449{
450 unsigned long i;
451
452 if (!hmm_vma_walk->fault) {
453 *fault = *write_fault = false;
454 return;
455 }
456
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700457 *fault = *write_fault = false;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700458 for (i = 0; i < npages; ++i) {
459 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
460 fault, write_fault);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700461 if ((*write_fault))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700462 return;
463 }
464}
465
466static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
467 struct mm_walk *walk)
468{
469 struct hmm_vma_walk *hmm_vma_walk = walk->private;
470 struct hmm_range *range = hmm_vma_walk->range;
471 bool fault, write_fault;
472 unsigned long i, npages;
473 uint64_t *pfns;
474
475 i = (addr - range->start) >> PAGE_SHIFT;
476 npages = (end - addr) >> PAGE_SHIFT;
477 pfns = &range->pfns[i];
478 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
479 0, &fault, &write_fault);
480 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
481}
482
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700483static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700484{
485 if (pmd_protnone(pmd))
486 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700487 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
488 range->flags[HMM_PFN_WRITE] :
489 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700490}
491
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700492static int hmm_vma_handle_pmd(struct mm_walk *walk,
493 unsigned long addr,
494 unsigned long end,
495 uint64_t *pfns,
496 pmd_t pmd)
497{
498 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700499 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700500 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700501 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700502 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700503
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700504 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700505 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700506 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
507 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700508
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700509 if (pmd_protnone(pmd) || fault || write_fault)
510 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700511
512 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700513 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700514 pfns[i] = hmm_pfn_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700515 hmm_vma_walk->last = end;
516 return 0;
517}
518
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700519static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700520{
521 if (pte_none(pte) || !pte_present(pte))
522 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700523 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
524 range->flags[HMM_PFN_WRITE] :
525 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700526}
527
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700528static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
529 unsigned long end, pmd_t *pmdp, pte_t *ptep,
530 uint64_t *pfn)
531{
532 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700533 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700534 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700535 bool fault, write_fault;
536 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700537 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700538 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700539
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700540 *pfn = range->values[HMM_PFN_NONE];
Jérôme Glisse73231612019-05-13 17:19:58 -0700541 fault = write_fault = false;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700542
543 if (pte_none(pte)) {
Jérôme Glisse73231612019-05-13 17:19:58 -0700544 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
545 &fault, &write_fault);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700546 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700547 goto fault;
548 return 0;
549 }
550
551 if (!pte_present(pte)) {
552 swp_entry_t entry = pte_to_swp_entry(pte);
553
554 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700555 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700556 goto fault;
557 return 0;
558 }
559
560 /*
561 * This is a special swap entry, ignore migration, use
562 * device and report anything else as error.
563 */
564 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700565 cpu_flags = range->flags[HMM_PFN_VALID] |
566 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700567 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700568 range->flags[HMM_PFN_WRITE] : 0;
569 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
570 &fault, &write_fault);
571 if (fault || write_fault)
572 goto fault;
573 *pfn = hmm_pfn_from_pfn(range, swp_offset(entry));
574 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700575 return 0;
576 }
577
578 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700579 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700580 pte_unmap(ptep);
581 hmm_vma_walk->last = addr;
582 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700583 pmdp, addr);
Jérôme Glisse73231612019-05-13 17:19:58 -0700584 return -EBUSY;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700585 }
586 return 0;
587 }
588
589 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700590 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700591 return -EFAULT;
Jérôme Glisse73231612019-05-13 17:19:58 -0700592 } else {
593 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
594 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
595 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700596 }
597
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700598 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700599 goto fault;
600
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700601 *pfn = hmm_pfn_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700602 return 0;
603
604fault:
605 pte_unmap(ptep);
606 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700607 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700608}
609
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700610static int hmm_vma_walk_pmd(pmd_t *pmdp,
611 unsigned long start,
612 unsigned long end,
613 struct mm_walk *walk)
614{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700615 struct hmm_vma_walk *hmm_vma_walk = walk->private;
616 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700617 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700618 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700619 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700620 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700621 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700622
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700623
624again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700625 pmd = READ_ONCE(*pmdp);
626 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700627 return hmm_vma_walk_hole(start, end, walk);
628
Jérôme Glissed08faca2018-10-30 15:04:20 -0700629 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700630 return hmm_pfns_bad(start, end, walk);
631
Jérôme Glissed08faca2018-10-30 15:04:20 -0700632 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
633 bool fault, write_fault;
634 unsigned long npages;
635 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700636
Jérôme Glissed08faca2018-10-30 15:04:20 -0700637 i = (addr - range->start) >> PAGE_SHIFT;
638 npages = (end - addr) >> PAGE_SHIFT;
639 pfns = &range->pfns[i];
640
641 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
642 0, &fault, &write_fault);
643 if (fault || write_fault) {
644 hmm_vma_walk->last = addr;
645 pmd_migration_entry_wait(vma->vm_mm, pmdp);
Jérôme Glisse73231612019-05-13 17:19:58 -0700646 return -EBUSY;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700647 }
648 return 0;
649 } else if (!pmd_present(pmd))
650 return hmm_pfns_bad(start, end, walk);
651
652 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700653 /*
654 * No need to take pmd_lock here, even if some other threads
655 * is splitting the huge pmd we will get that event through
656 * mmu_notifier callback.
657 *
658 * So just read pmd value and check again its a transparent
659 * huge or device mapping one and compute corresponding pfn
660 * values.
661 */
662 pmd = pmd_read_atomic(pmdp);
663 barrier();
664 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
665 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700666
Jérôme Glissed08faca2018-10-30 15:04:20 -0700667 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700668 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700669 }
670
Jérôme Glissed08faca2018-10-30 15:04:20 -0700671 /*
672 * We have handled all the valid case above ie either none, migration,
673 * huge or transparent huge. At this point either it is a valid pmd
674 * entry pointing to pte directory or it is a bad pmd that will not
675 * recover.
676 */
677 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700678 return hmm_pfns_bad(start, end, walk);
679
680 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700681 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700682 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700683 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700684
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700685 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
686 if (r) {
687 /* hmm_vma_handle_pte() did unmap pte directory */
688 hmm_vma_walk->last = addr;
689 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700690 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700691 }
692 pte_unmap(ptep - 1);
693
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700694 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700695 return 0;
696}
697
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700698static void hmm_pfns_clear(struct hmm_range *range,
699 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700700 unsigned long addr,
701 unsigned long end)
702{
703 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700704 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700705}
706
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700707static void hmm_pfns_special(struct hmm_range *range)
708{
709 unsigned long addr = range->start, i = 0;
710
711 for (; addr < range->end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700712 range->pfns[i] = range->values[HMM_PFN_SPECIAL];
Jérôme Glisse855ce7d2018-04-10 16:28:42 -0700713}
714
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700715/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700716 * hmm_range_register() - start tracking change to CPU page table over a range
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700717 * @range: range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700718 * @mm: the mm struct for the range of virtual address
719 * @start: start virtual address (inclusive)
720 * @end: end virtual address (exclusive)
721 * Returns 0 on success, -EFAULT if the address space is no longer valid
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700722 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700723 * Track updates to the CPU page table see include/linux/hmm.h
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700724 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700725int hmm_range_register(struct hmm_range *range,
726 struct mm_struct *mm,
727 unsigned long start,
728 unsigned long end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700729{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700730 range->start = start & PAGE_MASK;
731 range->end = end & PAGE_MASK;
732 range->valid = false;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700733 range->hmm = NULL;
734
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700735 if (range->start >= range->end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700736 return -EINVAL;
737
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700738 range->start = start;
739 range->end = end;
740
741 range->hmm = hmm_get_or_create(mm);
742 if (!range->hmm)
743 return -EFAULT;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700744
745 /* Check if hmm_mm_destroy() was call. */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700746 if (range->hmm->mm == NULL || range->hmm->dead) {
747 hmm_put(range->hmm);
748 return -EFAULT;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700749 }
750
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700751 /* Initialize range to track CPU page table update */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700752 mutex_lock(&range->hmm->lock);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700753
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700754 list_add_rcu(&range->list, &range->hmm->ranges);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700755
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700756 /*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700757 * If there are any concurrent notifiers we have to wait for them for
758 * the range to be valid (see hmm_range_wait_until_valid()).
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700759 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700760 if (!range->hmm->notifiers)
761 range->valid = true;
762 mutex_unlock(&range->hmm->lock);
763
764 return 0;
765}
766EXPORT_SYMBOL(hmm_range_register);
767
768/*
769 * hmm_range_unregister() - stop tracking change to CPU page table over a range
770 * @range: range
771 *
772 * Range struct is used to track updates to the CPU page table after a call to
773 * hmm_range_register(). See include/linux/hmm.h for how to use it.
774 */
775void hmm_range_unregister(struct hmm_range *range)
776{
777 /* Sanity check this really should not happen. */
778 if (range->hmm == NULL || range->end <= range->start)
779 return;
780
781 mutex_lock(&range->hmm->lock);
782 list_del_rcu(&range->list);
783 mutex_unlock(&range->hmm->lock);
784
785 /* Drop reference taken by hmm_range_register() */
786 range->valid = false;
787 hmm_put(range->hmm);
788 range->hmm = NULL;
789}
790EXPORT_SYMBOL(hmm_range_unregister);
791
792/*
793 * hmm_range_snapshot() - snapshot CPU page table for a range
794 * @range: range
795 * Returns: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
796 * permission (for instance asking for write and range is read only),
797 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
798 * vma or it is illegal to access that range), number of valid pages
799 * in range->pfns[] (from range start address).
800 *
801 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
802 * validity is tracked by range struct. See in include/linux/hmm.h for example
803 * on how to use.
804 */
805long hmm_range_snapshot(struct hmm_range *range)
806{
807 unsigned long start = range->start, end;
808 struct hmm_vma_walk hmm_vma_walk;
809 struct hmm *hmm = range->hmm;
810 struct vm_area_struct *vma;
811 struct mm_walk mm_walk;
812
813 /* Check if hmm_mm_destroy() was call. */
814 if (hmm->mm == NULL || hmm->dead)
815 return -EFAULT;
816
817 do {
818 /* If range is no longer valid force retry. */
819 if (!range->valid)
820 return -EAGAIN;
821
822 vma = find_vma(hmm->mm, start);
823 if (vma == NULL || (vma->vm_flags & VM_SPECIAL))
824 return -EFAULT;
825
826 /* FIXME support hugetlb fs/dax */
827 if (is_vm_hugetlb_page(vma) || vma_is_dax(vma)) {
828 hmm_pfns_special(range);
829 return -EINVAL;
830 }
831
832 if (!(vma->vm_flags & VM_READ)) {
833 /*
834 * If vma do not allow read access, then assume that it
835 * does not allow write access, either. HMM does not
836 * support architecture that allow write without read.
837 */
838 hmm_pfns_clear(range, range->pfns,
839 range->start, range->end);
840 return -EPERM;
841 }
842
843 range->vma = vma;
844 hmm_vma_walk.last = start;
845 hmm_vma_walk.fault = false;
846 hmm_vma_walk.range = range;
847 mm_walk.private = &hmm_vma_walk;
848 end = min(range->end, vma->vm_end);
849
850 mm_walk.vma = vma;
851 mm_walk.mm = vma->vm_mm;
852 mm_walk.pte_entry = NULL;
853 mm_walk.test_walk = NULL;
854 mm_walk.hugetlb_entry = NULL;
855 mm_walk.pmd_entry = hmm_vma_walk_pmd;
856 mm_walk.pte_hole = hmm_vma_walk_hole;
857
858 walk_page_range(start, end, &mm_walk);
859 start = end;
860 } while (start < range->end);
861
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700862 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700863}
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700864EXPORT_SYMBOL(hmm_range_snapshot);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700865
866/*
Jérôme Glisse73231612019-05-13 17:19:58 -0700867 * hmm_range_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -0700868 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -0700869 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Jérôme Glisse73231612019-05-13 17:19:58 -0700870 * Returns: number of valid pages in range->pfns[] (from range start
871 * address). This may be zero. If the return value is negative,
872 * then one of the following values may be returned:
873 *
874 * -EINVAL invalid arguments or mm or virtual address are in an
875 * invalid vma (ie either hugetlbfs or device file vma).
876 * -ENOMEM: Out of memory.
877 * -EPERM: Invalid permission (for instance asking for write and
878 * range is read only).
879 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
880 * happens if block argument is false.
881 * -EBUSY: If the the range is being invalidated and you should wait
882 * for invalidation to finish.
883 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
884 * that range), number of valid pages in range->pfns[] (from
885 * range start address).
Jérôme Glisse74eee182017-09-08 16:11:35 -0700886 *
887 * This is similar to a regular CPU page fault except that it will not trigger
Jérôme Glisse73231612019-05-13 17:19:58 -0700888 * any memory migration if the memory being faulted is not accessible by CPUs
889 * and caller does not ask for migration.
Jérôme Glisse74eee182017-09-08 16:11:35 -0700890 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700891 * On error, for one virtual address in the range, the function will mark the
892 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -0700893 */
Jérôme Glisse73231612019-05-13 17:19:58 -0700894long hmm_range_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700895{
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700896 unsigned long start = range->start, end;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700897 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700898 struct hmm *hmm = range->hmm;
899 struct vm_area_struct *vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700900 struct mm_walk mm_walk;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700901 int ret;
902
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700903 /* Check if hmm_mm_destroy() was call. */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700904 if (hmm->mm == NULL || hmm->dead)
905 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700906
907 do {
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700908 /* If range is no longer valid force retry. */
909 if (!range->valid) {
910 up_read(&hmm->mm->mmap_sem);
911 return -EAGAIN;
912 }
Jérôme Glisse74eee182017-09-08 16:11:35 -0700913
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700914 vma = find_vma(hmm->mm, start);
915 if (vma == NULL || (vma->vm_flags & VM_SPECIAL))
916 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700917
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700918 /* FIXME support hugetlb fs/dax */
919 if (is_vm_hugetlb_page(vma) || vma_is_dax(vma)) {
920 hmm_pfns_special(range);
921 return -EINVAL;
922 }
923
924 if (!(vma->vm_flags & VM_READ)) {
925 /*
926 * If vma do not allow read access, then assume that it
927 * does not allow write access, either. HMM does not
928 * support architecture that allow write without read.
929 */
930 hmm_pfns_clear(range, range->pfns,
931 range->start, range->end);
932 return -EPERM;
933 }
934
935 range->vma = vma;
936 hmm_vma_walk.last = start;
937 hmm_vma_walk.fault = true;
938 hmm_vma_walk.block = block;
939 hmm_vma_walk.range = range;
940 mm_walk.private = &hmm_vma_walk;
941 end = min(range->end, vma->vm_end);
942
943 mm_walk.vma = vma;
944 mm_walk.mm = vma->vm_mm;
945 mm_walk.pte_entry = NULL;
946 mm_walk.test_walk = NULL;
947 mm_walk.hugetlb_entry = NULL;
948 mm_walk.pmd_entry = hmm_vma_walk_pmd;
949 mm_walk.pte_hole = hmm_vma_walk_hole;
950
951 do {
952 ret = walk_page_range(start, end, &mm_walk);
953 start = hmm_vma_walk.last;
954
955 /* Keep trying while the range is valid. */
956 } while (ret == -EBUSY && range->valid);
957
958 if (ret) {
959 unsigned long i;
960
961 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
962 hmm_pfns_clear(range, &range->pfns[i],
963 hmm_vma_walk.last, range->end);
964 return ret;
965 }
966 start = end;
967
968 } while (start < range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700969
Jérôme Glisse73231612019-05-13 17:19:58 -0700970 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700971}
Jérôme Glisse73231612019-05-13 17:19:58 -0700972EXPORT_SYMBOL(hmm_range_fault);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700973#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700974
975
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700976#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700977struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
978 unsigned long addr)
979{
980 struct page *page;
981
982 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
983 if (!page)
984 return NULL;
985 lock_page(page);
986 return page;
987}
988EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
989
990
991static void hmm_devmem_ref_release(struct percpu_ref *ref)
992{
993 struct hmm_devmem *devmem;
994
995 devmem = container_of(ref, struct hmm_devmem, ref);
996 complete(&devmem->completion);
997}
998
999static void hmm_devmem_ref_exit(void *data)
1000{
1001 struct percpu_ref *ref = data;
1002 struct hmm_devmem *devmem;
1003
1004 devmem = container_of(ref, struct hmm_devmem, ref);
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001005 wait_for_completion(&devmem->completion);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001006 percpu_ref_exit(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001007}
1008
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001009static void hmm_devmem_ref_kill(struct percpu_ref *ref)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001010{
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001011 percpu_ref_kill(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001012}
1013
Souptick Joarderb57e622e62019-03-11 23:28:10 -07001014static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001015 unsigned long addr,
1016 const struct page *page,
1017 unsigned int flags,
1018 pmd_t *pmdp)
1019{
1020 struct hmm_devmem *devmem = page->pgmap->data;
1021
1022 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1023}
1024
1025static void hmm_devmem_free(struct page *page, void *data)
1026{
1027 struct hmm_devmem *devmem = data;
1028
Dan Williams2fa147b2018-07-13 21:50:01 -07001029 page->mapping = NULL;
1030
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001031 devmem->ops->free(devmem, page);
1032}
1033
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001034/*
1035 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1036 *
1037 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1038 * @device: device struct to bind the resource too
1039 * @size: size in bytes of the device memory to add
1040 * Returns: pointer to new hmm_devmem struct ERR_PTR otherwise
1041 *
1042 * This function first finds an empty range of physical address big enough to
1043 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1044 * in turn allocates struct pages. It does not do anything beyond that; all
1045 * events affecting the memory will go through the various callbacks provided
1046 * by hmm_devmem_ops struct.
1047 *
1048 * Device driver should call this function during device initialization and
1049 * is then responsible of memory management. HMM only provides helpers.
1050 */
1051struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1052 struct device *device,
1053 unsigned long size)
1054{
1055 struct hmm_devmem *devmem;
1056 resource_size_t addr;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001057 void *result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001058 int ret;
1059
Dan Williamse76384882018-05-16 11:46:08 -07001060 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001061
Dan Williams58ef15b2018-12-28 00:35:07 -08001062 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001063 if (!devmem)
1064 return ERR_PTR(-ENOMEM);
1065
1066 init_completion(&devmem->completion);
1067 devmem->pfn_first = -1UL;
1068 devmem->pfn_last = -1UL;
1069 devmem->resource = NULL;
1070 devmem->device = device;
1071 devmem->ops = ops;
1072
1073 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1074 0, GFP_KERNEL);
1075 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001076 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001077
Dan Williams58ef15b2018-12-28 00:35:07 -08001078 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001079 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001080 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001081
1082 size = ALIGN(size, PA_SECTION_SIZE);
1083 addr = min((unsigned long)iomem_resource.end,
1084 (1UL << MAX_PHYSMEM_BITS) - 1);
1085 addr = addr - size + 1UL;
1086
1087 /*
1088 * FIXME add a new helper to quickly walk resource tree and find free
1089 * range
1090 *
1091 * FIXME what about ioport_resource resource ?
1092 */
1093 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1094 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1095 if (ret != REGION_DISJOINT)
1096 continue;
1097
1098 devmem->resource = devm_request_mem_region(device, addr, size,
1099 dev_name(device));
Dan Williams58ef15b2018-12-28 00:35:07 -08001100 if (!devmem->resource)
1101 return ERR_PTR(-ENOMEM);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001102 break;
1103 }
Dan Williams58ef15b2018-12-28 00:35:07 -08001104 if (!devmem->resource)
1105 return ERR_PTR(-ERANGE);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001106
1107 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1108 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1109 devmem->pfn_last = devmem->pfn_first +
1110 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001111 devmem->page_fault = hmm_devmem_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001112
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001113 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1114 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001115 devmem->pagemap.page_free = hmm_devmem_free;
1116 devmem->pagemap.altmap_valid = false;
1117 devmem->pagemap.ref = &devmem->ref;
1118 devmem->pagemap.data = devmem;
1119 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001120
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001121 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1122 if (IS_ERR(result))
1123 return result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001124 return devmem;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001125}
Dan Williams02917e92018-12-28 00:35:15 -08001126EXPORT_SYMBOL_GPL(hmm_devmem_add);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001127
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001128struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1129 struct device *device,
1130 struct resource *res)
1131{
1132 struct hmm_devmem *devmem;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001133 void *result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001134 int ret;
1135
1136 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1137 return ERR_PTR(-EINVAL);
1138
Dan Williamse76384882018-05-16 11:46:08 -07001139 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001140
Dan Williams58ef15b2018-12-28 00:35:07 -08001141 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001142 if (!devmem)
1143 return ERR_PTR(-ENOMEM);
1144
1145 init_completion(&devmem->completion);
1146 devmem->pfn_first = -1UL;
1147 devmem->pfn_last = -1UL;
1148 devmem->resource = res;
1149 devmem->device = device;
1150 devmem->ops = ops;
1151
1152 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1153 0, GFP_KERNEL);
1154 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001155 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001156
Dan Williams58ef15b2018-12-28 00:35:07 -08001157 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1158 &devmem->ref);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001159 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001160 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001161
1162 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1163 devmem->pfn_last = devmem->pfn_first +
1164 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001165 devmem->page_fault = hmm_devmem_fault;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001166
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001167 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1168 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001169 devmem->pagemap.page_free = hmm_devmem_free;
1170 devmem->pagemap.altmap_valid = false;
1171 devmem->pagemap.ref = &devmem->ref;
1172 devmem->pagemap.data = devmem;
1173 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001174
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001175 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1176 if (IS_ERR(result))
1177 return result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001178 return devmem;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001179}
Dan Williams02917e92018-12-28 00:35:15 -08001180EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001181
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001182/*
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001183 * A device driver that wants to handle multiple devices memory through a
1184 * single fake device can use hmm_device to do so. This is purely a helper
1185 * and it is not needed to make use of any HMM functionality.
1186 */
1187#define HMM_DEVICE_MAX 256
1188
1189static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1190static DEFINE_SPINLOCK(hmm_device_lock);
1191static struct class *hmm_device_class;
1192static dev_t hmm_device_devt;
1193
1194static void hmm_device_release(struct device *device)
1195{
1196 struct hmm_device *hmm_device;
1197
1198 hmm_device = container_of(device, struct hmm_device, device);
1199 spin_lock(&hmm_device_lock);
1200 clear_bit(hmm_device->minor, hmm_device_mask);
1201 spin_unlock(&hmm_device_lock);
1202
1203 kfree(hmm_device);
1204}
1205
1206struct hmm_device *hmm_device_new(void *drvdata)
1207{
1208 struct hmm_device *hmm_device;
1209
1210 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1211 if (!hmm_device)
1212 return ERR_PTR(-ENOMEM);
1213
1214 spin_lock(&hmm_device_lock);
1215 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1216 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1217 spin_unlock(&hmm_device_lock);
1218 kfree(hmm_device);
1219 return ERR_PTR(-EBUSY);
1220 }
1221 set_bit(hmm_device->minor, hmm_device_mask);
1222 spin_unlock(&hmm_device_lock);
1223
1224 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1225 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1226 hmm_device->minor);
1227 hmm_device->device.release = hmm_device_release;
1228 dev_set_drvdata(&hmm_device->device, drvdata);
1229 hmm_device->device.class = hmm_device_class;
1230 device_initialize(&hmm_device->device);
1231
1232 return hmm_device;
1233}
1234EXPORT_SYMBOL(hmm_device_new);
1235
1236void hmm_device_put(struct hmm_device *hmm_device)
1237{
1238 put_device(&hmm_device->device);
1239}
1240EXPORT_SYMBOL(hmm_device_put);
1241
1242static int __init hmm_init(void)
1243{
1244 int ret;
1245
1246 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1247 HMM_DEVICE_MAX,
1248 "hmm_device");
1249 if (ret)
1250 return ret;
1251
1252 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1253 if (IS_ERR(hmm_device_class)) {
1254 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1255 return PTR_ERR(hmm_device_class);
1256 }
1257 return 0;
1258}
1259
1260device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001261#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */