blob: 22a97ada108b4e6fa7114292265c8b773a5b1b33 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07002/*
3 * Copyright 2013 Red Hat Inc.
4 *
Jérôme Glissef813f212018-10-30 15:04:06 -07005 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07006 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11#include <linux/mm.h>
12#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070013#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070014#include <linux/rmap.h>
15#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070016#include <linux/slab.h>
17#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070018#include <linux/mmzone.h>
19#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070020#include <linux/swapops.h>
21#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070022#include <linux/memremap.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070023#include <linux/jump_label.h>
Jérôme Glisse55c0ece2019-05-13 17:20:28 -070024#include <linux/dma-mapping.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070025#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070026#include <linux/memory_hotplug.h>
27
28#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070029
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070030#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070031static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
32
Jérôme Glisse704f3f22019-05-13 17:19:48 -070033static inline struct hmm *mm_get_hmm(struct mm_struct *mm)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070034{
Jérôme Glissec0b12402017-09-08 16:11:27 -070035 struct hmm *hmm = READ_ONCE(mm->hmm);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070036
37 if (hmm && kref_get_unless_zero(&hmm->kref))
38 return hmm;
39
40 return NULL;
41}
42
43/**
44 * hmm_get_or_create - register HMM against an mm (HMM internal)
45 *
46 * @mm: mm struct to attach to
47 * Returns: returns an HMM object, either by referencing the existing
48 * (per-process) object, or by creating a new one.
49 *
50 * This is not intended to be used directly by device drivers. If mm already
51 * has an HMM struct then it get a reference on it and returns it. Otherwise
52 * it allocates an HMM struct, initializes it, associate it with the mm and
53 * returns it.
54 */
55static struct hmm *hmm_get_or_create(struct mm_struct *mm)
56{
57 struct hmm *hmm = mm_get_hmm(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -070058 bool cleanup = false;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070059
Jérôme Glissec0b12402017-09-08 16:11:27 -070060 if (hmm)
61 return hmm;
62
63 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
64 if (!hmm)
65 return NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -070066 init_waitqueue_head(&hmm->wq);
Jérôme Glissec0b12402017-09-08 16:11:27 -070067 INIT_LIST_HEAD(&hmm->mirrors);
68 init_rwsem(&hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -070069 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070070 INIT_LIST_HEAD(&hmm->ranges);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070071 mutex_init(&hmm->lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070072 kref_init(&hmm->kref);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070073 hmm->notifiers = 0;
74 hmm->dead = false;
Jérôme Glissec0b12402017-09-08 16:11:27 -070075 hmm->mm = mm;
76
Jérôme Glissec0b12402017-09-08 16:11:27 -070077 spin_lock(&mm->page_table_lock);
78 if (!mm->hmm)
79 mm->hmm = hmm;
80 else
81 cleanup = true;
82 spin_unlock(&mm->page_table_lock);
83
Ralph Campbell86a2d592018-10-30 15:04:14 -070084 if (cleanup)
85 goto error;
86
87 /*
88 * We should only get here if hold the mmap_sem in write mode ie on
89 * registration of first mirror through hmm_mirror_register()
90 */
91 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
92 if (__mmu_notifier_register(&hmm->mmu_notifier, mm))
93 goto error_mm;
Jérôme Glissec0b12402017-09-08 16:11:27 -070094
Jérôme Glisse704f3f22019-05-13 17:19:48 -070095 return hmm;
Ralph Campbell86a2d592018-10-30 15:04:14 -070096
97error_mm:
98 spin_lock(&mm->page_table_lock);
99 if (mm->hmm == hmm)
100 mm->hmm = NULL;
101 spin_unlock(&mm->page_table_lock);
102error:
103 kfree(hmm);
104 return NULL;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700105}
106
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300107static void hmm_free_rcu(struct rcu_head *rcu)
108{
109 kfree(container_of(rcu, struct hmm, rcu));
110}
111
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700112static void hmm_free(struct kref *kref)
113{
114 struct hmm *hmm = container_of(kref, struct hmm, kref);
115 struct mm_struct *mm = hmm->mm;
116
117 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, mm);
118
119 spin_lock(&mm->page_table_lock);
120 if (mm->hmm == hmm)
121 mm->hmm = NULL;
122 spin_unlock(&mm->page_table_lock);
123
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300124 mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700125}
126
127static inline void hmm_put(struct hmm *hmm)
128{
129 kref_put(&hmm->kref, hmm_free);
130}
131
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700132void hmm_mm_destroy(struct mm_struct *mm)
133{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700134 struct hmm *hmm;
135
136 spin_lock(&mm->page_table_lock);
137 hmm = mm_get_hmm(mm);
138 mm->hmm = NULL;
139 if (hmm) {
140 hmm->mm = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700141 hmm->dead = true;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700142 spin_unlock(&mm->page_table_lock);
143 hmm_put(hmm);
144 return;
145 }
146
147 spin_unlock(&mm->page_table_lock);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700148}
Jérôme Glissec0b12402017-09-08 16:11:27 -0700149
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700150static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700151{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300152 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700153 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700154 struct hmm_range *range;
155
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300156 /* Bail out if hmm is in the process of being freed */
157 if (!kref_get_unless_zero(&hmm->kref))
158 return;
159
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700160 /* Report this HMM as dying. */
161 hmm->dead = true;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700162
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700163 /* Wake-up everyone waiting on any range. */
164 mutex_lock(&hmm->lock);
Ralph Campbell085ea252019-05-06 16:29:39 -0700165 list_for_each_entry(range, &hmm->ranges, list)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700166 range->valid = false;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700167 wake_up_all(&hmm->wq);
168 mutex_unlock(&hmm->lock);
Ralph Campbelle1401512018-04-10 16:28:19 -0700169
170 down_write(&hmm->mirrors_sem);
171 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
172 list);
173 while (mirror) {
174 list_del_init(&mirror->list);
175 if (mirror->ops->release) {
176 /*
Ralph Campbell085ea252019-05-06 16:29:39 -0700177 * Drop mirrors_sem so the release callback can wait
178 * on any pending work that might itself trigger a
179 * mmu_notifier callback and thus would deadlock with
180 * us.
Ralph Campbelle1401512018-04-10 16:28:19 -0700181 */
182 up_write(&hmm->mirrors_sem);
183 mirror->ops->release(mirror);
184 down_write(&hmm->mirrors_sem);
185 }
186 mirror = list_first_entry_or_null(&hmm->mirrors,
187 struct hmm_mirror, list);
188 }
189 up_write(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700190
191 hmm_put(hmm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700192}
193
Michal Hocko93065ac2018-08-21 21:52:33 -0700194static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700195 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700196{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300197 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700198 struct hmm_mirror *mirror;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700199 struct hmm_update update;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700200 struct hmm_range *range;
201 int ret = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700202
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300203 if (!kref_get_unless_zero(&hmm->kref))
204 return 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700205
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700206 update.start = nrange->start;
207 update.end = nrange->end;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700208 update.event = HMM_UPDATE_INVALIDATE;
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700209 update.blockable = mmu_notifier_range_blockable(nrange);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700210
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700211 if (mmu_notifier_range_blockable(nrange))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700212 mutex_lock(&hmm->lock);
213 else if (!mutex_trylock(&hmm->lock)) {
214 ret = -EAGAIN;
215 goto out;
216 }
217 hmm->notifiers++;
218 list_for_each_entry(range, &hmm->ranges, list) {
219 if (update.end < range->start || update.start >= range->end)
220 continue;
221
222 range->valid = false;
223 }
224 mutex_unlock(&hmm->lock);
225
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700226 if (mmu_notifier_range_blockable(nrange))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700227 down_read(&hmm->mirrors_sem);
228 else if (!down_read_trylock(&hmm->mirrors_sem)) {
229 ret = -EAGAIN;
230 goto out;
231 }
232 list_for_each_entry(mirror, &hmm->mirrors, list) {
233 int ret;
234
235 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
Ralph Campbell085ea252019-05-06 16:29:39 -0700236 if (!update.blockable && ret == -EAGAIN)
237 break;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700238 }
239 up_read(&hmm->mirrors_sem);
240
241out:
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700242 hmm_put(hmm);
243 return ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700244}
245
246static void hmm_invalidate_range_end(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700247 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700248{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300249 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700250
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300251 if (!kref_get_unless_zero(&hmm->kref))
252 return;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700253
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700254 mutex_lock(&hmm->lock);
255 hmm->notifiers--;
256 if (!hmm->notifiers) {
257 struct hmm_range *range;
258
259 list_for_each_entry(range, &hmm->ranges, list) {
260 if (range->valid)
261 continue;
262 range->valid = true;
263 }
264 wake_up_all(&hmm->wq);
265 }
266 mutex_unlock(&hmm->lock);
267
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700268 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700269}
270
271static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700272 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700273 .invalidate_range_start = hmm_invalidate_range_start,
274 .invalidate_range_end = hmm_invalidate_range_end,
275};
276
277/*
278 * hmm_mirror_register() - register a mirror against an mm
279 *
280 * @mirror: new mirror struct to register
281 * @mm: mm to register against
Ralph Campbell085ea252019-05-06 16:29:39 -0700282 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
Jérôme Glissec0b12402017-09-08 16:11:27 -0700283 *
284 * To start mirroring a process address space, the device driver must register
285 * an HMM mirror struct.
286 *
287 * THE mm->mmap_sem MUST BE HELD IN WRITE MODE !
288 */
289int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
290{
291 /* Sanity check */
292 if (!mm || !mirror || !mirror->ops)
293 return -EINVAL;
294
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700295 mirror->hmm = hmm_get_or_create(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700296 if (!mirror->hmm)
297 return -ENOMEM;
298
299 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700300 list_add(&mirror->list, &mirror->hmm->mirrors);
301 up_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700302
303 return 0;
304}
305EXPORT_SYMBOL(hmm_mirror_register);
306
307/*
308 * hmm_mirror_unregister() - unregister a mirror
309 *
Ralph Campbell085ea252019-05-06 16:29:39 -0700310 * @mirror: mirror struct to unregister
Jérôme Glissec0b12402017-09-08 16:11:27 -0700311 *
312 * Stop mirroring a process address space, and cleanup.
313 */
314void hmm_mirror_unregister(struct hmm_mirror *mirror)
315{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700316 struct hmm *hmm = READ_ONCE(mirror->hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700317
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700318 if (hmm == NULL)
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700319 return;
320
Jérôme Glissec0b12402017-09-08 16:11:27 -0700321 down_write(&hmm->mirrors_sem);
Ralph Campbelle1401512018-04-10 16:28:19 -0700322 list_del_init(&mirror->list);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700323 /* To protect us against double unregister ... */
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700324 mirror->hmm = NULL;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700325 up_write(&hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700326
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700327 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700328}
329EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700330
Jérôme Glisse74eee182017-09-08 16:11:35 -0700331struct hmm_vma_walk {
332 struct hmm_range *range;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700333 struct dev_pagemap *pgmap;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700334 unsigned long last;
335 bool fault;
336 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700337};
338
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700339static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
340 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700341{
Kuehling, Felix9b1ae602019-05-10 19:53:24 +0000342 unsigned int flags = FAULT_FLAG_REMOTE;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700343 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700344 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700345 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700346 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700347
348 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700349 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700350 ret = handle_mm_fault(vma, addr, flags);
351 if (ret & VM_FAULT_RETRY)
Jérôme Glisse73231612019-05-13 17:19:58 -0700352 return -EAGAIN;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700353 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700354 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700355 return -EFAULT;
356 }
357
Jérôme Glisse73231612019-05-13 17:19:58 -0700358 return -EBUSY;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700359}
360
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700361static int hmm_pfns_bad(unsigned long addr,
362 unsigned long end,
363 struct mm_walk *walk)
364{
Jérôme Glissec7195472018-04-10 16:28:27 -0700365 struct hmm_vma_walk *hmm_vma_walk = walk->private;
366 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700367 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700368 unsigned long i;
369
370 i = (addr - range->start) >> PAGE_SHIFT;
371 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700372 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700373
374 return 0;
375}
376
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700377/*
378 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
379 * @start: range virtual start address (inclusive)
380 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700381 * @fault: should we fault or not ?
382 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700383 * @walk: mm_walk structure
Ralph Campbell085ea252019-05-06 16:29:39 -0700384 * Return: 0 on success, -EBUSY after page fault, or page fault error
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700385 *
386 * This function will be called whenever pmd_none() or pte_none() returns true,
387 * or whenever there is no page directory covering the virtual address range.
388 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700389static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
390 bool fault, bool write_fault,
391 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700392{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700393 struct hmm_vma_walk *hmm_vma_walk = walk->private;
394 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700395 uint64_t *pfns = range->pfns;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700396 unsigned long i, page_size;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700397
Jérôme Glisse74eee182017-09-08 16:11:35 -0700398 hmm_vma_walk->last = addr;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700399 page_size = hmm_range_page_size(range);
400 i = (addr - range->start) >> range->page_shift;
401
402 for (; addr < end; addr += page_size, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700403 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700404 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700405 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700406
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700407 ret = hmm_vma_do_fault(walk, addr, write_fault,
408 &pfns[i]);
Jérôme Glisse73231612019-05-13 17:19:58 -0700409 if (ret != -EBUSY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700410 return ret;
411 }
412 }
413
Jérôme Glisse73231612019-05-13 17:19:58 -0700414 return (fault || write_fault) ? -EBUSY : 0;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700415}
416
417static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
418 uint64_t pfns, uint64_t cpu_flags,
419 bool *fault, bool *write_fault)
420{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700421 struct hmm_range *range = hmm_vma_walk->range;
422
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700423 if (!hmm_vma_walk->fault)
424 return;
425
Jérôme Glisse023a0192019-05-13 17:20:05 -0700426 /*
427 * So we not only consider the individual per page request we also
428 * consider the default flags requested for the range. The API can
429 * be use in 2 fashions. The first one where the HMM user coalesce
430 * multiple page fault into one request and set flags per pfns for
431 * of those faults. The second one where the HMM user want to pre-
432 * fault a range with specific flags. For the latter one it is a
433 * waste to have the user pre-fill the pfn arrays with a default
434 * flags value.
435 */
436 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
437
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700438 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700439 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700440 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700441 /* If this is device memory than only fault if explicitly requested */
442 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
443 /* Do we fault on device memory ? */
444 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
445 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
446 *fault = true;
447 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700448 return;
449 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700450
451 /* If CPU page table is not valid then we need to fault */
452 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
453 /* Need to write fault ? */
454 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
455 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
456 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700457 *fault = true;
458 }
459}
460
461static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
462 const uint64_t *pfns, unsigned long npages,
463 uint64_t cpu_flags, bool *fault,
464 bool *write_fault)
465{
466 unsigned long i;
467
468 if (!hmm_vma_walk->fault) {
469 *fault = *write_fault = false;
470 return;
471 }
472
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700473 *fault = *write_fault = false;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700474 for (i = 0; i < npages; ++i) {
475 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
476 fault, write_fault);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700477 if ((*write_fault))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700478 return;
479 }
480}
481
482static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
483 struct mm_walk *walk)
484{
485 struct hmm_vma_walk *hmm_vma_walk = walk->private;
486 struct hmm_range *range = hmm_vma_walk->range;
487 bool fault, write_fault;
488 unsigned long i, npages;
489 uint64_t *pfns;
490
491 i = (addr - range->start) >> PAGE_SHIFT;
492 npages = (end - addr) >> PAGE_SHIFT;
493 pfns = &range->pfns[i];
494 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
495 0, &fault, &write_fault);
496 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
497}
498
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700499static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700500{
501 if (pmd_protnone(pmd))
502 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700503 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
504 range->flags[HMM_PFN_WRITE] :
505 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700506}
507
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700508static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
509{
510 if (!pud_present(pud))
511 return 0;
512 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
513 range->flags[HMM_PFN_WRITE] :
514 range->flags[HMM_PFN_VALID];
515}
516
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700517static int hmm_vma_handle_pmd(struct mm_walk *walk,
518 unsigned long addr,
519 unsigned long end,
520 uint64_t *pfns,
521 pmd_t pmd)
522{
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700523#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700524 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700525 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700526 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700527 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700528 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700529
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700530 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700531 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700532 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
533 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700534
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700535 if (pmd_protnone(pmd) || fault || write_fault)
536 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700537
538 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700539 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
540 if (pmd_devmap(pmd)) {
541 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
542 hmm_vma_walk->pgmap);
543 if (unlikely(!hmm_vma_walk->pgmap))
544 return -EBUSY;
545 }
Jérôme Glisse391aab12019-05-13 17:20:31 -0700546 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700547 }
548 if (hmm_vma_walk->pgmap) {
549 put_dev_pagemap(hmm_vma_walk->pgmap);
550 hmm_vma_walk->pgmap = NULL;
551 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700552 hmm_vma_walk->last = end;
553 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700554#else
555 /* If THP is not enabled then we should never reach that code ! */
556 return -EINVAL;
557#endif
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700558}
559
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700560static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700561{
Philip Yang789c2af2019-05-23 16:32:31 -0400562 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700563 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700564 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
565 range->flags[HMM_PFN_WRITE] :
566 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700567}
568
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700569static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
570 unsigned long end, pmd_t *pmdp, pte_t *ptep,
571 uint64_t *pfn)
572{
573 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700574 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700575 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700576 bool fault, write_fault;
577 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700578 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700579 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700580
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700581 *pfn = range->values[HMM_PFN_NONE];
Jérôme Glisse73231612019-05-13 17:19:58 -0700582 fault = write_fault = false;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700583
584 if (pte_none(pte)) {
Jérôme Glisse73231612019-05-13 17:19:58 -0700585 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
586 &fault, &write_fault);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700587 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700588 goto fault;
589 return 0;
590 }
591
592 if (!pte_present(pte)) {
593 swp_entry_t entry = pte_to_swp_entry(pte);
594
595 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700596 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700597 goto fault;
598 return 0;
599 }
600
601 /*
602 * This is a special swap entry, ignore migration, use
603 * device and report anything else as error.
604 */
605 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700606 cpu_flags = range->flags[HMM_PFN_VALID] |
607 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700608 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700609 range->flags[HMM_PFN_WRITE] : 0;
610 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
611 &fault, &write_fault);
612 if (fault || write_fault)
613 goto fault;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700614 *pfn = hmm_device_entry_from_pfn(range,
615 swp_offset(entry));
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700616 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700617 return 0;
618 }
619
620 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700621 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700622 pte_unmap(ptep);
623 hmm_vma_walk->last = addr;
624 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700625 pmdp, addr);
Jérôme Glisse73231612019-05-13 17:19:58 -0700626 return -EBUSY;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700627 }
628 return 0;
629 }
630
631 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700632 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700633 return -EFAULT;
Jérôme Glisse73231612019-05-13 17:19:58 -0700634 } else {
635 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
636 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
637 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700638 }
639
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700640 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700641 goto fault;
642
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700643 if (pte_devmap(pte)) {
644 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
645 hmm_vma_walk->pgmap);
646 if (unlikely(!hmm_vma_walk->pgmap))
647 return -EBUSY;
648 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
649 *pfn = range->values[HMM_PFN_SPECIAL];
650 return -EFAULT;
651 }
652
Jérôme Glisse391aab12019-05-13 17:20:31 -0700653 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700654 return 0;
655
656fault:
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700657 if (hmm_vma_walk->pgmap) {
658 put_dev_pagemap(hmm_vma_walk->pgmap);
659 hmm_vma_walk->pgmap = NULL;
660 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700661 pte_unmap(ptep);
662 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700663 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700664}
665
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700666static int hmm_vma_walk_pmd(pmd_t *pmdp,
667 unsigned long start,
668 unsigned long end,
669 struct mm_walk *walk)
670{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700671 struct hmm_vma_walk *hmm_vma_walk = walk->private;
672 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700673 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700674 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700675 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700676 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700677 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700678
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700679
680again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700681 pmd = READ_ONCE(*pmdp);
682 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700683 return hmm_vma_walk_hole(start, end, walk);
684
Jérôme Glissed08faca2018-10-30 15:04:20 -0700685 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700686 return hmm_pfns_bad(start, end, walk);
687
Jérôme Glissed08faca2018-10-30 15:04:20 -0700688 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
689 bool fault, write_fault;
690 unsigned long npages;
691 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700692
Jérôme Glissed08faca2018-10-30 15:04:20 -0700693 i = (addr - range->start) >> PAGE_SHIFT;
694 npages = (end - addr) >> PAGE_SHIFT;
695 pfns = &range->pfns[i];
696
697 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
698 0, &fault, &write_fault);
699 if (fault || write_fault) {
700 hmm_vma_walk->last = addr;
701 pmd_migration_entry_wait(vma->vm_mm, pmdp);
Jérôme Glisse73231612019-05-13 17:19:58 -0700702 return -EBUSY;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700703 }
704 return 0;
705 } else if (!pmd_present(pmd))
706 return hmm_pfns_bad(start, end, walk);
707
708 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700709 /*
710 * No need to take pmd_lock here, even if some other threads
711 * is splitting the huge pmd we will get that event through
712 * mmu_notifier callback.
713 *
714 * So just read pmd value and check again its a transparent
715 * huge or device mapping one and compute corresponding pfn
716 * values.
717 */
718 pmd = pmd_read_atomic(pmdp);
719 barrier();
720 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
721 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700722
Jérôme Glissed08faca2018-10-30 15:04:20 -0700723 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700724 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700725 }
726
Jérôme Glissed08faca2018-10-30 15:04:20 -0700727 /*
728 * We have handled all the valid case above ie either none, migration,
729 * huge or transparent huge. At this point either it is a valid pmd
730 * entry pointing to pte directory or it is a bad pmd that will not
731 * recover.
732 */
733 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700734 return hmm_pfns_bad(start, end, walk);
735
736 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700737 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700738 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700739 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700740
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700741 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
742 if (r) {
743 /* hmm_vma_handle_pte() did unmap pte directory */
744 hmm_vma_walk->last = addr;
745 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700746 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700747 }
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700748 if (hmm_vma_walk->pgmap) {
749 /*
750 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
751 * so that we can leverage get_dev_pagemap() optimization which
752 * will not re-take a reference on a pgmap if we already have
753 * one.
754 */
755 put_dev_pagemap(hmm_vma_walk->pgmap);
756 hmm_vma_walk->pgmap = NULL;
757 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700758 pte_unmap(ptep - 1);
759
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700760 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700761 return 0;
762}
763
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700764static int hmm_vma_walk_pud(pud_t *pudp,
765 unsigned long start,
766 unsigned long end,
767 struct mm_walk *walk)
768{
769 struct hmm_vma_walk *hmm_vma_walk = walk->private;
770 struct hmm_range *range = hmm_vma_walk->range;
771 unsigned long addr = start, next;
772 pmd_t *pmdp;
773 pud_t pud;
774 int ret;
775
776again:
777 pud = READ_ONCE(*pudp);
778 if (pud_none(pud))
779 return hmm_vma_walk_hole(start, end, walk);
780
781 if (pud_huge(pud) && pud_devmap(pud)) {
782 unsigned long i, npages, pfn;
783 uint64_t *pfns, cpu_flags;
784 bool fault, write_fault;
785
786 if (!pud_present(pud))
787 return hmm_vma_walk_hole(start, end, walk);
788
789 i = (addr - range->start) >> PAGE_SHIFT;
790 npages = (end - addr) >> PAGE_SHIFT;
791 pfns = &range->pfns[i];
792
793 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
794 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
795 cpu_flags, &fault, &write_fault);
796 if (fault || write_fault)
797 return hmm_vma_walk_hole_(addr, end, fault,
798 write_fault, walk);
799
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700800 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
801 for (i = 0; i < npages; ++i, ++pfn) {
802 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
803 hmm_vma_walk->pgmap);
804 if (unlikely(!hmm_vma_walk->pgmap))
805 return -EBUSY;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700806 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
807 cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700808 }
809 if (hmm_vma_walk->pgmap) {
810 put_dev_pagemap(hmm_vma_walk->pgmap);
811 hmm_vma_walk->pgmap = NULL;
812 }
813 hmm_vma_walk->last = end;
814 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700815 }
816
817 split_huge_pud(walk->vma, pudp, addr);
818 if (pud_none(*pudp))
819 goto again;
820
821 pmdp = pmd_offset(pudp, addr);
822 do {
823 next = pmd_addr_end(addr, end);
824 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
825 if (ret)
826 return ret;
827 } while (pmdp++, addr = next, addr != end);
828
829 return 0;
830}
831
Jérôme Glisse63d50662019-05-13 17:20:18 -0700832static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
833 unsigned long start, unsigned long end,
834 struct mm_walk *walk)
835{
836#ifdef CONFIG_HUGETLB_PAGE
837 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
838 struct hmm_vma_walk *hmm_vma_walk = walk->private;
839 struct hmm_range *range = hmm_vma_walk->range;
840 struct vm_area_struct *vma = walk->vma;
841 struct hstate *h = hstate_vma(vma);
842 uint64_t orig_pfn, cpu_flags;
843 bool fault, write_fault;
844 spinlock_t *ptl;
845 pte_t entry;
846 int ret = 0;
847
848 size = 1UL << huge_page_shift(h);
849 mask = size - 1;
850 if (range->page_shift != PAGE_SHIFT) {
851 /* Make sure we are looking at full page. */
852 if (start & mask)
853 return -EINVAL;
854 if (end < (start + size))
855 return -EINVAL;
856 pfn_inc = size >> PAGE_SHIFT;
857 } else {
858 pfn_inc = 1;
859 size = PAGE_SIZE;
860 }
861
862
863 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
864 entry = huge_ptep_get(pte);
865
866 i = (start - range->start) >> range->page_shift;
867 orig_pfn = range->pfns[i];
868 range->pfns[i] = range->values[HMM_PFN_NONE];
869 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
870 fault = write_fault = false;
871 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
872 &fault, &write_fault);
873 if (fault || write_fault) {
874 ret = -ENOENT;
875 goto unlock;
876 }
877
878 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
879 for (; addr < end; addr += size, i++, pfn += pfn_inc)
Jérôme Glisse391aab12019-05-13 17:20:31 -0700880 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
881 cpu_flags;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700882 hmm_vma_walk->last = end;
883
884unlock:
885 spin_unlock(ptl);
886
887 if (ret == -ENOENT)
888 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
889
890 return ret;
891#else /* CONFIG_HUGETLB_PAGE */
892 return -EINVAL;
893#endif
894}
895
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700896static void hmm_pfns_clear(struct hmm_range *range,
897 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700898 unsigned long addr,
899 unsigned long end)
900{
901 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700902 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700903}
904
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700905/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700906 * hmm_range_register() - start tracking change to CPU page table over a range
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700907 * @range: range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700908 * @mm: the mm struct for the range of virtual address
909 * @start: start virtual address (inclusive)
910 * @end: end virtual address (exclusive)
Jérôme Glisse63d50662019-05-13 17:20:18 -0700911 * @page_shift: expect page shift for the range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700912 * Returns 0 on success, -EFAULT if the address space is no longer valid
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700913 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700914 * Track updates to the CPU page table see include/linux/hmm.h
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700915 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700916int hmm_range_register(struct hmm_range *range,
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300917 struct hmm_mirror *mirror,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700918 unsigned long start,
Jérôme Glisse63d50662019-05-13 17:20:18 -0700919 unsigned long end,
920 unsigned page_shift)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700921{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700922 unsigned long mask = ((1UL << page_shift) - 1UL);
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300923 struct hmm *hmm = mirror->hmm;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700924
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700925 range->valid = false;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700926 range->hmm = NULL;
927
Jérôme Glisse63d50662019-05-13 17:20:18 -0700928 if ((start & mask) || (end & mask))
929 return -EINVAL;
930 if (start >= end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700931 return -EINVAL;
932
Jérôme Glisse63d50662019-05-13 17:20:18 -0700933 range->page_shift = page_shift;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700934 range->start = start;
935 range->end = end;
936
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700937 /* Check if hmm_mm_destroy() was call. */
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300938 if (hmm->mm == NULL || hmm->dead)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700939 return -EFAULT;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700940
Ralph Campbell085ea252019-05-06 16:29:39 -0700941 /* Initialize range to track CPU page table updates. */
942 mutex_lock(&hmm->lock);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700943
Ralph Campbell085ea252019-05-06 16:29:39 -0700944 range->hmm = hmm;
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300945 kref_get(&hmm->kref);
Ralph Campbell085ea252019-05-06 16:29:39 -0700946 list_add_rcu(&range->list, &hmm->ranges);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700947
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700948 /*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700949 * If there are any concurrent notifiers we have to wait for them for
950 * the range to be valid (see hmm_range_wait_until_valid()).
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700951 */
Ralph Campbell085ea252019-05-06 16:29:39 -0700952 if (!hmm->notifiers)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700953 range->valid = true;
Ralph Campbell085ea252019-05-06 16:29:39 -0700954 mutex_unlock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700955
956 return 0;
957}
958EXPORT_SYMBOL(hmm_range_register);
959
960/*
961 * hmm_range_unregister() - stop tracking change to CPU page table over a range
962 * @range: range
963 *
964 * Range struct is used to track updates to the CPU page table after a call to
965 * hmm_range_register(). See include/linux/hmm.h for how to use it.
966 */
967void hmm_range_unregister(struct hmm_range *range)
968{
Ralph Campbell085ea252019-05-06 16:29:39 -0700969 struct hmm *hmm = range->hmm;
970
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700971 /* Sanity check this really should not happen. */
Ralph Campbell085ea252019-05-06 16:29:39 -0700972 if (hmm == NULL || range->end <= range->start)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700973 return;
974
Ralph Campbell085ea252019-05-06 16:29:39 -0700975 mutex_lock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700976 list_del_rcu(&range->list);
Ralph Campbell085ea252019-05-06 16:29:39 -0700977 mutex_unlock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700978
979 /* Drop reference taken by hmm_range_register() */
980 range->valid = false;
Ralph Campbell085ea252019-05-06 16:29:39 -0700981 hmm_put(hmm);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700982 range->hmm = NULL;
983}
984EXPORT_SYMBOL(hmm_range_unregister);
985
986/*
987 * hmm_range_snapshot() - snapshot CPU page table for a range
988 * @range: range
Ralph Campbell085ea252019-05-06 16:29:39 -0700989 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700990 * permission (for instance asking for write and range is read only),
991 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
992 * vma or it is illegal to access that range), number of valid pages
993 * in range->pfns[] (from range start address).
994 *
995 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
996 * validity is tracked by range struct. See in include/linux/hmm.h for example
997 * on how to use.
998 */
999long hmm_range_snapshot(struct hmm_range *range)
1000{
Jérôme Glisse63d50662019-05-13 17:20:18 -07001001 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001002 unsigned long start = range->start, end;
1003 struct hmm_vma_walk hmm_vma_walk;
1004 struct hmm *hmm = range->hmm;
1005 struct vm_area_struct *vma;
1006 struct mm_walk mm_walk;
1007
1008 /* Check if hmm_mm_destroy() was call. */
1009 if (hmm->mm == NULL || hmm->dead)
1010 return -EFAULT;
1011
1012 do {
1013 /* If range is no longer valid force retry. */
1014 if (!range->valid)
1015 return -EAGAIN;
1016
1017 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001018 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001019 return -EFAULT;
1020
Jérôme Glisse63d50662019-05-13 17:20:18 -07001021 if (is_vm_hugetlb_page(vma)) {
Jason Gunthorpe1c2308f02019-05-27 17:02:21 -03001022 if (huge_page_shift(hstate_vma(vma)) !=
1023 range->page_shift &&
Jérôme Glisse63d50662019-05-13 17:20:18 -07001024 range->page_shift != PAGE_SHIFT)
1025 return -EINVAL;
1026 } else {
1027 if (range->page_shift != PAGE_SHIFT)
1028 return -EINVAL;
1029 }
1030
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001031 if (!(vma->vm_flags & VM_READ)) {
1032 /*
1033 * If vma do not allow read access, then assume that it
1034 * does not allow write access, either. HMM does not
1035 * support architecture that allow write without read.
1036 */
1037 hmm_pfns_clear(range, range->pfns,
1038 range->start, range->end);
1039 return -EPERM;
1040 }
1041
1042 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001043 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001044 hmm_vma_walk.last = start;
1045 hmm_vma_walk.fault = false;
1046 hmm_vma_walk.range = range;
1047 mm_walk.private = &hmm_vma_walk;
1048 end = min(range->end, vma->vm_end);
1049
1050 mm_walk.vma = vma;
1051 mm_walk.mm = vma->vm_mm;
1052 mm_walk.pte_entry = NULL;
1053 mm_walk.test_walk = NULL;
1054 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001055 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001056 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1057 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001058 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001059
1060 walk_page_range(start, end, &mm_walk);
1061 start = end;
1062 } while (start < range->end);
1063
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001064 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001065}
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001066EXPORT_SYMBOL(hmm_range_snapshot);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001067
1068/*
Jérôme Glisse73231612019-05-13 17:19:58 -07001069 * hmm_range_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -07001070 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -07001071 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001072 * Return: number of valid pages in range->pfns[] (from range start
Jérôme Glisse73231612019-05-13 17:19:58 -07001073 * address). This may be zero. If the return value is negative,
1074 * then one of the following values may be returned:
1075 *
1076 * -EINVAL invalid arguments or mm or virtual address are in an
Jérôme Glisse63d50662019-05-13 17:20:18 -07001077 * invalid vma (for instance device file vma).
Jérôme Glisse73231612019-05-13 17:19:58 -07001078 * -ENOMEM: Out of memory.
1079 * -EPERM: Invalid permission (for instance asking for write and
1080 * range is read only).
1081 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1082 * happens if block argument is false.
1083 * -EBUSY: If the the range is being invalidated and you should wait
1084 * for invalidation to finish.
1085 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1086 * that range), number of valid pages in range->pfns[] (from
1087 * range start address).
Jérôme Glisse74eee182017-09-08 16:11:35 -07001088 *
1089 * This is similar to a regular CPU page fault except that it will not trigger
Jérôme Glisse73231612019-05-13 17:19:58 -07001090 * any memory migration if the memory being faulted is not accessible by CPUs
1091 * and caller does not ask for migration.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001092 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -07001093 * On error, for one virtual address in the range, the function will mark the
1094 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001095 */
Jérôme Glisse73231612019-05-13 17:19:58 -07001096long hmm_range_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -07001097{
Jérôme Glisse63d50662019-05-13 17:20:18 -07001098 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001099 unsigned long start = range->start, end;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001100 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001101 struct hmm *hmm = range->hmm;
1102 struct vm_area_struct *vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001103 struct mm_walk mm_walk;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001104 int ret;
1105
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001106 /* Check if hmm_mm_destroy() was call. */
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001107 if (hmm->mm == NULL || hmm->dead)
1108 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001109
1110 do {
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001111 /* If range is no longer valid force retry. */
1112 if (!range->valid) {
1113 up_read(&hmm->mm->mmap_sem);
1114 return -EAGAIN;
1115 }
Jérôme Glisse74eee182017-09-08 16:11:35 -07001116
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001117 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001118 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001119 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001120
Jérôme Glisse63d50662019-05-13 17:20:18 -07001121 if (is_vm_hugetlb_page(vma)) {
1122 if (huge_page_shift(hstate_vma(vma)) !=
1123 range->page_shift &&
1124 range->page_shift != PAGE_SHIFT)
1125 return -EINVAL;
1126 } else {
1127 if (range->page_shift != PAGE_SHIFT)
1128 return -EINVAL;
1129 }
1130
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001131 if (!(vma->vm_flags & VM_READ)) {
1132 /*
1133 * If vma do not allow read access, then assume that it
1134 * does not allow write access, either. HMM does not
1135 * support architecture that allow write without read.
1136 */
1137 hmm_pfns_clear(range, range->pfns,
1138 range->start, range->end);
1139 return -EPERM;
1140 }
1141
1142 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001143 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001144 hmm_vma_walk.last = start;
1145 hmm_vma_walk.fault = true;
1146 hmm_vma_walk.block = block;
1147 hmm_vma_walk.range = range;
1148 mm_walk.private = &hmm_vma_walk;
1149 end = min(range->end, vma->vm_end);
1150
1151 mm_walk.vma = vma;
1152 mm_walk.mm = vma->vm_mm;
1153 mm_walk.pte_entry = NULL;
1154 mm_walk.test_walk = NULL;
1155 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001156 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001157 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1158 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001159 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001160
1161 do {
1162 ret = walk_page_range(start, end, &mm_walk);
1163 start = hmm_vma_walk.last;
1164
1165 /* Keep trying while the range is valid. */
1166 } while (ret == -EBUSY && range->valid);
1167
1168 if (ret) {
1169 unsigned long i;
1170
1171 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1172 hmm_pfns_clear(range, &range->pfns[i],
1173 hmm_vma_walk.last, range->end);
1174 return ret;
1175 }
1176 start = end;
1177
1178 } while (start < range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001179
Jérôme Glisse73231612019-05-13 17:19:58 -07001180 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001181}
Jérôme Glisse73231612019-05-13 17:19:58 -07001182EXPORT_SYMBOL(hmm_range_fault);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001183
1184/**
1185 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1186 * @range: range being faulted
1187 * @device: device against to dma map page to
1188 * @daddrs: dma address of mapped pages
1189 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001190 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001191 * drop and you need to try again, some other error value otherwise
1192 *
1193 * Note same usage pattern as hmm_range_fault().
1194 */
1195long hmm_range_dma_map(struct hmm_range *range,
1196 struct device *device,
1197 dma_addr_t *daddrs,
1198 bool block)
1199{
1200 unsigned long i, npages, mapped;
1201 long ret;
1202
1203 ret = hmm_range_fault(range, block);
1204 if (ret <= 0)
1205 return ret ? ret : -EBUSY;
1206
1207 npages = (range->end - range->start) >> PAGE_SHIFT;
1208 for (i = 0, mapped = 0; i < npages; ++i) {
1209 enum dma_data_direction dir = DMA_TO_DEVICE;
1210 struct page *page;
1211
1212 /*
1213 * FIXME need to update DMA API to provide invalid DMA address
1214 * value instead of a function to test dma address value. This
1215 * would remove lot of dumb code duplicated accross many arch.
1216 *
1217 * For now setting it to 0 here is good enough as the pfns[]
1218 * value is what is use to check what is valid and what isn't.
1219 */
1220 daddrs[i] = 0;
1221
Jérôme Glisse391aab12019-05-13 17:20:31 -07001222 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001223 if (page == NULL)
1224 continue;
1225
1226 /* Check if range is being invalidated */
1227 if (!range->valid) {
1228 ret = -EBUSY;
1229 goto unmap;
1230 }
1231
1232 /* If it is read and write than map bi-directional. */
1233 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1234 dir = DMA_BIDIRECTIONAL;
1235
1236 daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
1237 if (dma_mapping_error(device, daddrs[i])) {
1238 ret = -EFAULT;
1239 goto unmap;
1240 }
1241
1242 mapped++;
1243 }
1244
1245 return mapped;
1246
1247unmap:
1248 for (npages = i, i = 0; (i < npages) && mapped; ++i) {
1249 enum dma_data_direction dir = DMA_TO_DEVICE;
1250 struct page *page;
1251
Jérôme Glisse391aab12019-05-13 17:20:31 -07001252 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001253 if (page == NULL)
1254 continue;
1255
1256 if (dma_mapping_error(device, daddrs[i]))
1257 continue;
1258
1259 /* If it is read and write than map bi-directional. */
1260 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1261 dir = DMA_BIDIRECTIONAL;
1262
1263 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1264 mapped--;
1265 }
1266
1267 return ret;
1268}
1269EXPORT_SYMBOL(hmm_range_dma_map);
1270
1271/**
1272 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1273 * @range: range being unmapped
1274 * @vma: the vma against which the range (optional)
1275 * @device: device against which dma map was done
1276 * @daddrs: dma address of mapped pages
1277 * @dirty: dirty page if it had the write flag set
Ralph Campbell085ea252019-05-06 16:29:39 -07001278 * Return: number of page unmapped on success, -EINVAL otherwise
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001279 *
1280 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1281 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1282 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1283 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1284 */
1285long hmm_range_dma_unmap(struct hmm_range *range,
1286 struct vm_area_struct *vma,
1287 struct device *device,
1288 dma_addr_t *daddrs,
1289 bool dirty)
1290{
1291 unsigned long i, npages;
1292 long cpages = 0;
1293
1294 /* Sanity check. */
1295 if (range->end <= range->start)
1296 return -EINVAL;
1297 if (!daddrs)
1298 return -EINVAL;
1299 if (!range->pfns)
1300 return -EINVAL;
1301
1302 npages = (range->end - range->start) >> PAGE_SHIFT;
1303 for (i = 0; i < npages; ++i) {
1304 enum dma_data_direction dir = DMA_TO_DEVICE;
1305 struct page *page;
1306
Jérôme Glisse391aab12019-05-13 17:20:31 -07001307 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001308 if (page == NULL)
1309 continue;
1310
1311 /* If it is read and write than map bi-directional. */
1312 if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
1313 dir = DMA_BIDIRECTIONAL;
1314
1315 /*
1316 * See comments in function description on why it is
1317 * safe here to call set_page_dirty()
1318 */
1319 if (dirty)
1320 set_page_dirty(page);
1321 }
1322
1323 /* Unmap and clear pfns/dma address */
1324 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1325 range->pfns[i] = range->values[HMM_PFN_NONE];
1326 /* FIXME see comments in hmm_vma_dma_map() */
1327 daddrs[i] = 0;
1328 cpages++;
1329 }
1330
1331 return cpages;
1332}
1333EXPORT_SYMBOL(hmm_range_dma_unmap);
Jérôme Glissec0b12402017-09-08 16:11:27 -07001334#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001335
1336
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001337#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001338struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
1339 unsigned long addr)
1340{
1341 struct page *page;
1342
1343 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1344 if (!page)
1345 return NULL;
1346 lock_page(page);
1347 return page;
1348}
1349EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
1350
1351
1352static void hmm_devmem_ref_release(struct percpu_ref *ref)
1353{
1354 struct hmm_devmem *devmem;
1355
1356 devmem = container_of(ref, struct hmm_devmem, ref);
1357 complete(&devmem->completion);
1358}
1359
1360static void hmm_devmem_ref_exit(void *data)
1361{
1362 struct percpu_ref *ref = data;
1363 struct hmm_devmem *devmem;
1364
1365 devmem = container_of(ref, struct hmm_devmem, ref);
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001366 wait_for_completion(&devmem->completion);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001367 percpu_ref_exit(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001368}
1369
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001370static void hmm_devmem_ref_kill(struct percpu_ref *ref)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001371{
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001372 percpu_ref_kill(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001373}
1374
Souptick Joarderb57e622e62019-03-11 23:28:10 -07001375static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001376 unsigned long addr,
1377 const struct page *page,
1378 unsigned int flags,
1379 pmd_t *pmdp)
1380{
1381 struct hmm_devmem *devmem = page->pgmap->data;
1382
1383 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1384}
1385
1386static void hmm_devmem_free(struct page *page, void *data)
1387{
1388 struct hmm_devmem *devmem = data;
1389
Dan Williams2fa147b2018-07-13 21:50:01 -07001390 page->mapping = NULL;
1391
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001392 devmem->ops->free(devmem, page);
1393}
1394
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001395/*
1396 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1397 *
1398 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1399 * @device: device struct to bind the resource too
1400 * @size: size in bytes of the device memory to add
Ralph Campbell085ea252019-05-06 16:29:39 -07001401 * Return: pointer to new hmm_devmem struct ERR_PTR otherwise
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001402 *
1403 * This function first finds an empty range of physical address big enough to
1404 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1405 * in turn allocates struct pages. It does not do anything beyond that; all
1406 * events affecting the memory will go through the various callbacks provided
1407 * by hmm_devmem_ops struct.
1408 *
1409 * Device driver should call this function during device initialization and
1410 * is then responsible of memory management. HMM only provides helpers.
1411 */
1412struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1413 struct device *device,
1414 unsigned long size)
1415{
1416 struct hmm_devmem *devmem;
1417 resource_size_t addr;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001418 void *result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001419 int ret;
1420
Dan Williamse76384882018-05-16 11:46:08 -07001421 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001422
Dan Williams58ef15b2018-12-28 00:35:07 -08001423 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001424 if (!devmem)
1425 return ERR_PTR(-ENOMEM);
1426
1427 init_completion(&devmem->completion);
1428 devmem->pfn_first = -1UL;
1429 devmem->pfn_last = -1UL;
1430 devmem->resource = NULL;
1431 devmem->device = device;
1432 devmem->ops = ops;
1433
1434 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1435 0, GFP_KERNEL);
1436 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001437 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001438
Dan Williams58ef15b2018-12-28 00:35:07 -08001439 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001440 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001441 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001442
1443 size = ALIGN(size, PA_SECTION_SIZE);
1444 addr = min((unsigned long)iomem_resource.end,
1445 (1UL << MAX_PHYSMEM_BITS) - 1);
1446 addr = addr - size + 1UL;
1447
1448 /*
1449 * FIXME add a new helper to quickly walk resource tree and find free
1450 * range
1451 *
1452 * FIXME what about ioport_resource resource ?
1453 */
1454 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1455 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1456 if (ret != REGION_DISJOINT)
1457 continue;
1458
1459 devmem->resource = devm_request_mem_region(device, addr, size,
1460 dev_name(device));
Dan Williams58ef15b2018-12-28 00:35:07 -08001461 if (!devmem->resource)
1462 return ERR_PTR(-ENOMEM);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001463 break;
1464 }
Dan Williams58ef15b2018-12-28 00:35:07 -08001465 if (!devmem->resource)
1466 return ERR_PTR(-ERANGE);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001467
1468 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1469 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1470 devmem->pfn_last = devmem->pfn_first +
1471 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001472 devmem->page_fault = hmm_devmem_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001473
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001474 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1475 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001476 devmem->pagemap.page_free = hmm_devmem_free;
1477 devmem->pagemap.altmap_valid = false;
1478 devmem->pagemap.ref = &devmem->ref;
1479 devmem->pagemap.data = devmem;
1480 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001481
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001482 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1483 if (IS_ERR(result))
1484 return result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001485 return devmem;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001486}
Dan Williams02917e92018-12-28 00:35:15 -08001487EXPORT_SYMBOL_GPL(hmm_devmem_add);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001488
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001489struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1490 struct device *device,
1491 struct resource *res)
1492{
1493 struct hmm_devmem *devmem;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001494 void *result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001495 int ret;
1496
1497 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1498 return ERR_PTR(-EINVAL);
1499
Dan Williamse76384882018-05-16 11:46:08 -07001500 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001501
Dan Williams58ef15b2018-12-28 00:35:07 -08001502 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001503 if (!devmem)
1504 return ERR_PTR(-ENOMEM);
1505
1506 init_completion(&devmem->completion);
1507 devmem->pfn_first = -1UL;
1508 devmem->pfn_last = -1UL;
1509 devmem->resource = res;
1510 devmem->device = device;
1511 devmem->ops = ops;
1512
1513 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1514 0, GFP_KERNEL);
1515 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001516 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001517
Dan Williams58ef15b2018-12-28 00:35:07 -08001518 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1519 &devmem->ref);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001520 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001521 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001522
1523 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1524 devmem->pfn_last = devmem->pfn_first +
1525 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001526 devmem->page_fault = hmm_devmem_fault;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001527
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001528 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1529 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001530 devmem->pagemap.page_free = hmm_devmem_free;
1531 devmem->pagemap.altmap_valid = false;
1532 devmem->pagemap.ref = &devmem->ref;
1533 devmem->pagemap.data = devmem;
1534 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001535
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001536 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1537 if (IS_ERR(result))
1538 return result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001539 return devmem;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001540}
Dan Williams02917e92018-12-28 00:35:15 -08001541EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001542
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001543/*
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001544 * A device driver that wants to handle multiple devices memory through a
1545 * single fake device can use hmm_device to do so. This is purely a helper
1546 * and it is not needed to make use of any HMM functionality.
1547 */
1548#define HMM_DEVICE_MAX 256
1549
1550static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1551static DEFINE_SPINLOCK(hmm_device_lock);
1552static struct class *hmm_device_class;
1553static dev_t hmm_device_devt;
1554
1555static void hmm_device_release(struct device *device)
1556{
1557 struct hmm_device *hmm_device;
1558
1559 hmm_device = container_of(device, struct hmm_device, device);
1560 spin_lock(&hmm_device_lock);
1561 clear_bit(hmm_device->minor, hmm_device_mask);
1562 spin_unlock(&hmm_device_lock);
1563
1564 kfree(hmm_device);
1565}
1566
1567struct hmm_device *hmm_device_new(void *drvdata)
1568{
1569 struct hmm_device *hmm_device;
1570
1571 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1572 if (!hmm_device)
1573 return ERR_PTR(-ENOMEM);
1574
1575 spin_lock(&hmm_device_lock);
1576 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1577 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1578 spin_unlock(&hmm_device_lock);
1579 kfree(hmm_device);
1580 return ERR_PTR(-EBUSY);
1581 }
1582 set_bit(hmm_device->minor, hmm_device_mask);
1583 spin_unlock(&hmm_device_lock);
1584
1585 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1586 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1587 hmm_device->minor);
1588 hmm_device->device.release = hmm_device_release;
1589 dev_set_drvdata(&hmm_device->device, drvdata);
1590 hmm_device->device.class = hmm_device_class;
1591 device_initialize(&hmm_device->device);
1592
1593 return hmm_device;
1594}
1595EXPORT_SYMBOL(hmm_device_new);
1596
1597void hmm_device_put(struct hmm_device *hmm_device)
1598{
1599 put_device(&hmm_device->device);
1600}
1601EXPORT_SYMBOL(hmm_device_put);
1602
1603static int __init hmm_init(void)
1604{
1605 int ret;
1606
1607 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1608 HMM_DEVICE_MAX,
1609 "hmm_device");
1610 if (ret)
1611 return ret;
1612
1613 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1614 if (IS_ERR(hmm_device_class)) {
1615 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1616 return PTR_ERR(hmm_device_class);
1617 }
1618 return 0;
1619}
1620
1621device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001622#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */