blob: 6f5dc6d568feb106030f3c0032a061c827ebac51 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07002/*
3 * Copyright 2013 Red Hat Inc.
4 *
Jérôme Glissef813f212018-10-30 15:04:06 -07005 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07006 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11#include <linux/mm.h>
12#include <linux/hmm.h>
Jérôme Glisse858b54d2017-09-08 16:12:02 -070013#include <linux/init.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070014#include <linux/rmap.h>
15#include <linux/swap.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070016#include <linux/slab.h>
17#include <linux/sched.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070018#include <linux/mmzone.h>
19#include <linux/pagemap.h>
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070020#include <linux/swapops.h>
21#include <linux/hugetlb.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070022#include <linux/memremap.h>
Jason Gunthorpec8a53b22019-05-23 10:36:46 -030023#include <linux/sched/mm.h>
Jérôme Glisse7b2d55d22017-09-08 16:11:46 -070024#include <linux/jump_label.h>
Jérôme Glisse55c0ece2019-05-13 17:20:28 -070025#include <linux/dma-mapping.h>
Jérôme Glissec0b12402017-09-08 16:11:27 -070026#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070027#include <linux/memory_hotplug.h>
28
29#define PA_SECTION_SIZE (1UL << PA_SECTION_SHIFT)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070030
Jérôme Glisse6b368cd2017-09-08 16:12:32 -070031#if IS_ENABLED(CONFIG_HMM_MIRROR)
Jérôme Glissec0b12402017-09-08 16:11:27 -070032static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
33
Jérôme Glisse704f3f22019-05-13 17:19:48 -070034/**
35 * hmm_get_or_create - register HMM against an mm (HMM internal)
36 *
37 * @mm: mm struct to attach to
38 * Returns: returns an HMM object, either by referencing the existing
39 * (per-process) object, or by creating a new one.
40 *
41 * This is not intended to be used directly by device drivers. If mm already
42 * has an HMM struct then it get a reference on it and returns it. Otherwise
43 * it allocates an HMM struct, initializes it, associate it with the mm and
44 * returns it.
45 */
46static struct hmm *hmm_get_or_create(struct mm_struct *mm)
47{
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030048 struct hmm *hmm;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070049
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030050 lockdep_assert_held_exclusive(&mm->mmap_sem);
51
52 /* Abuse the page_table_lock to also protect mm->hmm. */
53 spin_lock(&mm->page_table_lock);
54 hmm = mm->hmm;
55 if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref))
56 goto out_unlock;
57 spin_unlock(&mm->page_table_lock);
Jérôme Glissec0b12402017-09-08 16:11:27 -070058
59 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
60 if (!hmm)
61 return NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -070062 init_waitqueue_head(&hmm->wq);
Jérôme Glissec0b12402017-09-08 16:11:27 -070063 INIT_LIST_HEAD(&hmm->mirrors);
64 init_rwsem(&hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -070065 hmm->mmu_notifier.ops = NULL;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070066 INIT_LIST_HEAD(&hmm->ranges);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070067 mutex_init(&hmm->lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -070068 kref_init(&hmm->kref);
Jérôme Glissea3e0d412019-05-13 17:20:01 -070069 hmm->notifiers = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -070070 hmm->mm = mm;
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030071
72 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
73 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
74 kfree(hmm);
75 return NULL;
76 }
77
Jason Gunthorpec8a53b22019-05-23 10:36:46 -030078 mmgrab(hmm->mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -070079
Ralph Campbell86a2d592018-10-30 15:04:14 -070080 /*
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030081 * We hold the exclusive mmap_sem here so we know that mm->hmm is
82 * still NULL or 0 kref, and is safe to update.
Ralph Campbell86a2d592018-10-30 15:04:14 -070083 */
Ralph Campbell86a2d592018-10-30 15:04:14 -070084 spin_lock(&mm->page_table_lock);
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030085 mm->hmm = hmm;
86
87out_unlock:
Ralph Campbell86a2d592018-10-30 15:04:14 -070088 spin_unlock(&mm->page_table_lock);
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030089 return hmm;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070090}
91
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -030092static void hmm_free_rcu(struct rcu_head *rcu)
93{
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -030094 struct hmm *hmm = container_of(rcu, struct hmm, rcu);
95
96 mmdrop(hmm->mm);
97 kfree(hmm);
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -030098}
99
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700100static void hmm_free(struct kref *kref)
101{
102 struct hmm *hmm = container_of(kref, struct hmm, kref);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700103
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -0300104 spin_lock(&hmm->mm->page_table_lock);
105 if (hmm->mm->hmm == hmm)
106 hmm->mm->hmm = NULL;
107 spin_unlock(&hmm->mm->page_table_lock);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700108
Jason Gunthorpe8a9320b2019-05-23 10:24:13 -0300109 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm);
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300110 mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700111}
112
113static inline void hmm_put(struct hmm *hmm)
114{
115 kref_put(&hmm->kref, hmm_free);
116}
117
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700118static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700119{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300120 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700121 struct hmm_mirror *mirror;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700122
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300123 /* Bail out if hmm is in the process of being freed */
124 if (!kref_get_unless_zero(&hmm->kref))
125 return;
126
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300127 /*
128 * Since hmm_range_register() holds the mmget() lock hmm_release() is
129 * prevented as long as a range exists.
130 */
131 WARN_ON(!list_empty_careful(&hmm->ranges));
Ralph Campbelle1401512018-04-10 16:28:19 -0700132
133 down_write(&hmm->mirrors_sem);
134 mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
135 list);
136 while (mirror) {
137 list_del_init(&mirror->list);
138 if (mirror->ops->release) {
139 /*
Ralph Campbell085ea252019-05-06 16:29:39 -0700140 * Drop mirrors_sem so the release callback can wait
141 * on any pending work that might itself trigger a
142 * mmu_notifier callback and thus would deadlock with
143 * us.
Ralph Campbelle1401512018-04-10 16:28:19 -0700144 */
145 up_write(&hmm->mirrors_sem);
146 mirror->ops->release(mirror);
147 down_write(&hmm->mirrors_sem);
148 }
149 mirror = list_first_entry_or_null(&hmm->mirrors,
150 struct hmm_mirror, list);
151 }
152 up_write(&hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700153
154 hmm_put(hmm);
Ralph Campbelle1401512018-04-10 16:28:19 -0700155}
156
Michal Hocko93065ac2018-08-21 21:52:33 -0700157static int hmm_invalidate_range_start(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700158 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700159{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300160 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700161 struct hmm_mirror *mirror;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700162 struct hmm_update update;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700163 struct hmm_range *range;
164 int ret = 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700165
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300166 if (!kref_get_unless_zero(&hmm->kref))
167 return 0;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700168
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700169 update.start = nrange->start;
170 update.end = nrange->end;
Jérôme Glisseec131b22018-10-30 15:04:28 -0700171 update.event = HMM_UPDATE_INVALIDATE;
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700172 update.blockable = mmu_notifier_range_blockable(nrange);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700173
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700174 if (mmu_notifier_range_blockable(nrange))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700175 mutex_lock(&hmm->lock);
176 else if (!mutex_trylock(&hmm->lock)) {
177 ret = -EAGAIN;
178 goto out;
179 }
180 hmm->notifiers++;
181 list_for_each_entry(range, &hmm->ranges, list) {
182 if (update.end < range->start || update.start >= range->end)
183 continue;
184
185 range->valid = false;
186 }
187 mutex_unlock(&hmm->lock);
188
Jérôme Glissedfcd6662019-05-13 17:20:38 -0700189 if (mmu_notifier_range_blockable(nrange))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700190 down_read(&hmm->mirrors_sem);
191 else if (!down_read_trylock(&hmm->mirrors_sem)) {
192 ret = -EAGAIN;
193 goto out;
194 }
195 list_for_each_entry(mirror, &hmm->mirrors, list) {
196 int ret;
197
198 ret = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
Ralph Campbell085ea252019-05-06 16:29:39 -0700199 if (!update.blockable && ret == -EAGAIN)
200 break;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700201 }
202 up_read(&hmm->mirrors_sem);
203
204out:
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700205 hmm_put(hmm);
206 return ret;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700207}
208
209static void hmm_invalidate_range_end(struct mmu_notifier *mn,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700210 const struct mmu_notifier_range *nrange)
Jérôme Glissec0b12402017-09-08 16:11:27 -0700211{
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300212 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700213
Jason Gunthorpe6d7c3cd2019-05-22 16:52:52 -0300214 if (!kref_get_unless_zero(&hmm->kref))
215 return;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700216
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700217 mutex_lock(&hmm->lock);
218 hmm->notifiers--;
219 if (!hmm->notifiers) {
220 struct hmm_range *range;
221
222 list_for_each_entry(range, &hmm->ranges, list) {
223 if (range->valid)
224 continue;
225 range->valid = true;
226 }
227 wake_up_all(&hmm->wq);
228 }
229 mutex_unlock(&hmm->lock);
230
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700231 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700232}
233
234static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
Ralph Campbelle1401512018-04-10 16:28:19 -0700235 .release = hmm_release,
Jérôme Glissec0b12402017-09-08 16:11:27 -0700236 .invalidate_range_start = hmm_invalidate_range_start,
237 .invalidate_range_end = hmm_invalidate_range_end,
238};
239
240/*
241 * hmm_mirror_register() - register a mirror against an mm
242 *
243 * @mirror: new mirror struct to register
244 * @mm: mm to register against
Ralph Campbell085ea252019-05-06 16:29:39 -0700245 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
Jérôme Glissec0b12402017-09-08 16:11:27 -0700246 *
247 * To start mirroring a process address space, the device driver must register
248 * an HMM mirror struct.
Jérôme Glissec0b12402017-09-08 16:11:27 -0700249 */
250int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
251{
Jason Gunthorpe8a1a0cd2019-05-23 11:23:30 -0300252 lockdep_assert_held_exclusive(&mm->mmap_sem);
253
Jérôme Glissec0b12402017-09-08 16:11:27 -0700254 /* Sanity check */
255 if (!mm || !mirror || !mirror->ops)
256 return -EINVAL;
257
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700258 mirror->hmm = hmm_get_or_create(mm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700259 if (!mirror->hmm)
260 return -ENOMEM;
261
262 down_write(&mirror->hmm->mirrors_sem);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700263 list_add(&mirror->list, &mirror->hmm->mirrors);
264 up_write(&mirror->hmm->mirrors_sem);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700265
266 return 0;
267}
268EXPORT_SYMBOL(hmm_mirror_register);
269
270/*
271 * hmm_mirror_unregister() - unregister a mirror
272 *
Ralph Campbell085ea252019-05-06 16:29:39 -0700273 * @mirror: mirror struct to unregister
Jérôme Glissec0b12402017-09-08 16:11:27 -0700274 *
275 * Stop mirroring a process address space, and cleanup.
276 */
277void hmm_mirror_unregister(struct hmm_mirror *mirror)
278{
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700279 struct hmm *hmm = READ_ONCE(mirror->hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700280
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700281 if (hmm == NULL)
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700282 return;
283
Jérôme Glissec0b12402017-09-08 16:11:27 -0700284 down_write(&hmm->mirrors_sem);
Ralph Campbelle1401512018-04-10 16:28:19 -0700285 list_del_init(&mirror->list);
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700286 /* To protect us against double unregister ... */
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700287 mirror->hmm = NULL;
Jérôme Glissec0b12402017-09-08 16:11:27 -0700288 up_write(&hmm->mirrors_sem);
Jérôme Glissec01cbba2018-04-10 16:28:23 -0700289
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700290 hmm_put(hmm);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700291}
292EXPORT_SYMBOL(hmm_mirror_unregister);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700293
Jérôme Glisse74eee182017-09-08 16:11:35 -0700294struct hmm_vma_walk {
295 struct hmm_range *range;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700296 struct dev_pagemap *pgmap;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700297 unsigned long last;
298 bool fault;
299 bool block;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700300};
301
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700302static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
303 bool write_fault, uint64_t *pfn)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700304{
Kuehling, Felix9b1ae602019-05-10 19:53:24 +0000305 unsigned int flags = FAULT_FLAG_REMOTE;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700306 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700307 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700308 struct vm_area_struct *vma = walk->vma;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700309 vm_fault_t ret;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700310
311 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700312 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700313 ret = handle_mm_fault(vma, addr, flags);
314 if (ret & VM_FAULT_RETRY)
Jérôme Glisse73231612019-05-13 17:19:58 -0700315 return -EAGAIN;
Souptick Joarder50a7ca32018-08-17 15:44:47 -0700316 if (ret & VM_FAULT_ERROR) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700317 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse74eee182017-09-08 16:11:35 -0700318 return -EFAULT;
319 }
320
Jérôme Glisse73231612019-05-13 17:19:58 -0700321 return -EBUSY;
Jérôme Glisse74eee182017-09-08 16:11:35 -0700322}
323
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700324static int hmm_pfns_bad(unsigned long addr,
325 unsigned long end,
326 struct mm_walk *walk)
327{
Jérôme Glissec7195472018-04-10 16:28:27 -0700328 struct hmm_vma_walk *hmm_vma_walk = walk->private;
329 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700330 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700331 unsigned long i;
332
333 i = (addr - range->start) >> PAGE_SHIFT;
334 for (; addr < end; addr += PAGE_SIZE, i++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700335 pfns[i] = range->values[HMM_PFN_ERROR];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700336
337 return 0;
338}
339
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700340/*
341 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
342 * @start: range virtual start address (inclusive)
343 * @end: range virtual end address (exclusive)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700344 * @fault: should we fault or not ?
345 * @write_fault: write fault ?
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700346 * @walk: mm_walk structure
Ralph Campbell085ea252019-05-06 16:29:39 -0700347 * Return: 0 on success, -EBUSY after page fault, or page fault error
Jérôme Glisse5504ed22018-04-10 16:28:46 -0700348 *
349 * This function will be called whenever pmd_none() or pte_none() returns true,
350 * or whenever there is no page directory covering the virtual address range.
351 */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700352static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
353 bool fault, bool write_fault,
354 struct mm_walk *walk)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700355{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700356 struct hmm_vma_walk *hmm_vma_walk = walk->private;
357 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700358 uint64_t *pfns = range->pfns;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700359 unsigned long i, page_size;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700360
Jérôme Glisse74eee182017-09-08 16:11:35 -0700361 hmm_vma_walk->last = addr;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700362 page_size = hmm_range_page_size(range);
363 i = (addr - range->start) >> range->page_shift;
364
365 for (; addr < end; addr += page_size, i++) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700366 pfns[i] = range->values[HMM_PFN_NONE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700367 if (fault || write_fault) {
Jérôme Glisse74eee182017-09-08 16:11:35 -0700368 int ret;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700369
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700370 ret = hmm_vma_do_fault(walk, addr, write_fault,
371 &pfns[i]);
Jérôme Glisse73231612019-05-13 17:19:58 -0700372 if (ret != -EBUSY)
Jérôme Glisse74eee182017-09-08 16:11:35 -0700373 return ret;
374 }
375 }
376
Jérôme Glisse73231612019-05-13 17:19:58 -0700377 return (fault || write_fault) ? -EBUSY : 0;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700378}
379
380static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
381 uint64_t pfns, uint64_t cpu_flags,
382 bool *fault, bool *write_fault)
383{
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700384 struct hmm_range *range = hmm_vma_walk->range;
385
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700386 if (!hmm_vma_walk->fault)
387 return;
388
Jérôme Glisse023a0192019-05-13 17:20:05 -0700389 /*
390 * So we not only consider the individual per page request we also
391 * consider the default flags requested for the range. The API can
392 * be use in 2 fashions. The first one where the HMM user coalesce
393 * multiple page fault into one request and set flags per pfns for
394 * of those faults. The second one where the HMM user want to pre-
395 * fault a range with specific flags. For the latter one it is a
396 * waste to have the user pre-fill the pfn arrays with a default
397 * flags value.
398 */
399 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
400
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700401 /* We aren't ask to do anything ... */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700402 if (!(pfns & range->flags[HMM_PFN_VALID]))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700403 return;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700404 /* If this is device memory than only fault if explicitly requested */
405 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
406 /* Do we fault on device memory ? */
407 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
408 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
409 *fault = true;
410 }
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700411 return;
412 }
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700413
414 /* If CPU page table is not valid then we need to fault */
415 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
416 /* Need to write fault ? */
417 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
418 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
419 *write_fault = true;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700420 *fault = true;
421 }
422}
423
424static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
425 const uint64_t *pfns, unsigned long npages,
426 uint64_t cpu_flags, bool *fault,
427 bool *write_fault)
428{
429 unsigned long i;
430
431 if (!hmm_vma_walk->fault) {
432 *fault = *write_fault = false;
433 return;
434 }
435
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700436 *fault = *write_fault = false;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700437 for (i = 0; i < npages; ++i) {
438 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
439 fault, write_fault);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700440 if ((*write_fault))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700441 return;
442 }
443}
444
445static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
446 struct mm_walk *walk)
447{
448 struct hmm_vma_walk *hmm_vma_walk = walk->private;
449 struct hmm_range *range = hmm_vma_walk->range;
450 bool fault, write_fault;
451 unsigned long i, npages;
452 uint64_t *pfns;
453
454 i = (addr - range->start) >> PAGE_SHIFT;
455 npages = (end - addr) >> PAGE_SHIFT;
456 pfns = &range->pfns[i];
457 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
458 0, &fault, &write_fault);
459 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
460}
461
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700462static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700463{
464 if (pmd_protnone(pmd))
465 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700466 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
467 range->flags[HMM_PFN_WRITE] :
468 range->flags[HMM_PFN_VALID];
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700469}
470
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700471static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
472{
473 if (!pud_present(pud))
474 return 0;
475 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
476 range->flags[HMM_PFN_WRITE] :
477 range->flags[HMM_PFN_VALID];
478}
479
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700480static int hmm_vma_handle_pmd(struct mm_walk *walk,
481 unsigned long addr,
482 unsigned long end,
483 uint64_t *pfns,
484 pmd_t pmd)
485{
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700486#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700487 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700488 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700489 unsigned long pfn, npages, i;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700490 bool fault, write_fault;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700491 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700492
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700493 npages = (end - addr) >> PAGE_SHIFT;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700494 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700495 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
496 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700497
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700498 if (pmd_protnone(pmd) || fault || write_fault)
499 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700500
501 pfn = pmd_pfn(pmd) + pte_index(addr);
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700502 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
503 if (pmd_devmap(pmd)) {
504 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
505 hmm_vma_walk->pgmap);
506 if (unlikely(!hmm_vma_walk->pgmap))
507 return -EBUSY;
508 }
Jérôme Glisse391aab12019-05-13 17:20:31 -0700509 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700510 }
511 if (hmm_vma_walk->pgmap) {
512 put_dev_pagemap(hmm_vma_walk->pgmap);
513 hmm_vma_walk->pgmap = NULL;
514 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700515 hmm_vma_walk->last = end;
516 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700517#else
518 /* If THP is not enabled then we should never reach that code ! */
519 return -EINVAL;
520#endif
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700521}
522
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700523static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700524{
Philip Yang789c2af2019-05-23 16:32:31 -0400525 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700526 return 0;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700527 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
528 range->flags[HMM_PFN_WRITE] :
529 range->flags[HMM_PFN_VALID];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700530}
531
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700532static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
533 unsigned long end, pmd_t *pmdp, pte_t *ptep,
534 uint64_t *pfn)
535{
536 struct hmm_vma_walk *hmm_vma_walk = walk->private;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700537 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700538 struct vm_area_struct *vma = walk->vma;
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700539 bool fault, write_fault;
540 uint64_t cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700541 pte_t pte = *ptep;
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700542 uint64_t orig_pfn = *pfn;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700543
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700544 *pfn = range->values[HMM_PFN_NONE];
Jérôme Glisse73231612019-05-13 17:19:58 -0700545 fault = write_fault = false;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700546
547 if (pte_none(pte)) {
Jérôme Glisse73231612019-05-13 17:19:58 -0700548 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
549 &fault, &write_fault);
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700550 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700551 goto fault;
552 return 0;
553 }
554
555 if (!pte_present(pte)) {
556 swp_entry_t entry = pte_to_swp_entry(pte);
557
558 if (!non_swap_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700559 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700560 goto fault;
561 return 0;
562 }
563
564 /*
565 * This is a special swap entry, ignore migration, use
566 * device and report anything else as error.
567 */
568 if (is_device_private_entry(entry)) {
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700569 cpu_flags = range->flags[HMM_PFN_VALID] |
570 range->flags[HMM_PFN_DEVICE_PRIVATE];
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700571 cpu_flags |= is_write_device_private_entry(entry) ?
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700572 range->flags[HMM_PFN_WRITE] : 0;
573 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
574 &fault, &write_fault);
575 if (fault || write_fault)
576 goto fault;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700577 *pfn = hmm_device_entry_from_pfn(range,
578 swp_offset(entry));
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700579 *pfn |= cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700580 return 0;
581 }
582
583 if (is_migration_entry(entry)) {
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700584 if (fault || write_fault) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700585 pte_unmap(ptep);
586 hmm_vma_walk->last = addr;
587 migration_entry_wait(vma->vm_mm,
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700588 pmdp, addr);
Jérôme Glisse73231612019-05-13 17:19:58 -0700589 return -EBUSY;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700590 }
591 return 0;
592 }
593
594 /* Report error for everything else */
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700595 *pfn = range->values[HMM_PFN_ERROR];
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700596 return -EFAULT;
Jérôme Glisse73231612019-05-13 17:19:58 -0700597 } else {
598 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
599 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
600 &fault, &write_fault);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700601 }
602
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700603 if (fault || write_fault)
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700604 goto fault;
605
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700606 if (pte_devmap(pte)) {
607 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
608 hmm_vma_walk->pgmap);
609 if (unlikely(!hmm_vma_walk->pgmap))
610 return -EBUSY;
611 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
612 *pfn = range->values[HMM_PFN_SPECIAL];
613 return -EFAULT;
614 }
615
Jérôme Glisse391aab12019-05-13 17:20:31 -0700616 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700617 return 0;
618
619fault:
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700620 if (hmm_vma_walk->pgmap) {
621 put_dev_pagemap(hmm_vma_walk->pgmap);
622 hmm_vma_walk->pgmap = NULL;
623 }
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700624 pte_unmap(ptep);
625 /* Fault any virtual address we were asked to fault */
Jérôme Glisse2aee09d2018-04-10 16:29:02 -0700626 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700627}
628
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700629static int hmm_vma_walk_pmd(pmd_t *pmdp,
630 unsigned long start,
631 unsigned long end,
632 struct mm_walk *walk)
633{
Jérôme Glisse74eee182017-09-08 16:11:35 -0700634 struct hmm_vma_walk *hmm_vma_walk = walk->private;
635 struct hmm_range *range = hmm_vma_walk->range;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700636 struct vm_area_struct *vma = walk->vma;
Jérôme Glisseff05c0c2018-04-10 16:28:38 -0700637 uint64_t *pfns = range->pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700638 unsigned long addr = start, i;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700639 pte_t *ptep;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700640 pmd_t pmd;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700641
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700642
643again:
Jérôme Glissed08faca2018-10-30 15:04:20 -0700644 pmd = READ_ONCE(*pmdp);
645 if (pmd_none(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700646 return hmm_vma_walk_hole(start, end, walk);
647
Jérôme Glissed08faca2018-10-30 15:04:20 -0700648 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700649 return hmm_pfns_bad(start, end, walk);
650
Jérôme Glissed08faca2018-10-30 15:04:20 -0700651 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
652 bool fault, write_fault;
653 unsigned long npages;
654 uint64_t *pfns;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700655
Jérôme Glissed08faca2018-10-30 15:04:20 -0700656 i = (addr - range->start) >> PAGE_SHIFT;
657 npages = (end - addr) >> PAGE_SHIFT;
658 pfns = &range->pfns[i];
659
660 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
661 0, &fault, &write_fault);
662 if (fault || write_fault) {
663 hmm_vma_walk->last = addr;
664 pmd_migration_entry_wait(vma->vm_mm, pmdp);
Jérôme Glisse73231612019-05-13 17:19:58 -0700665 return -EBUSY;
Jérôme Glissed08faca2018-10-30 15:04:20 -0700666 }
667 return 0;
668 } else if (!pmd_present(pmd))
669 return hmm_pfns_bad(start, end, walk);
670
671 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700672 /*
673 * No need to take pmd_lock here, even if some other threads
674 * is splitting the huge pmd we will get that event through
675 * mmu_notifier callback.
676 *
677 * So just read pmd value and check again its a transparent
678 * huge or device mapping one and compute corresponding pfn
679 * values.
680 */
681 pmd = pmd_read_atomic(pmdp);
682 barrier();
683 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
684 goto again;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700685
Jérôme Glissed08faca2018-10-30 15:04:20 -0700686 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700687 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700688 }
689
Jérôme Glissed08faca2018-10-30 15:04:20 -0700690 /*
691 * We have handled all the valid case above ie either none, migration,
692 * huge or transparent huge. At this point either it is a valid pmd
693 * entry pointing to pte directory or it is a bad pmd that will not
694 * recover.
695 */
696 if (pmd_bad(pmd))
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700697 return hmm_pfns_bad(start, end, walk);
698
699 ptep = pte_offset_map(pmdp, addr);
Jérôme Glissed08faca2018-10-30 15:04:20 -0700700 i = (addr - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700701 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700702 int r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700703
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700704 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
705 if (r) {
706 /* hmm_vma_handle_pte() did unmap pte directory */
707 hmm_vma_walk->last = addr;
708 return r;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700709 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700710 }
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700711 if (hmm_vma_walk->pgmap) {
712 /*
713 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
714 * so that we can leverage get_dev_pagemap() optimization which
715 * will not re-take a reference on a pgmap if we already have
716 * one.
717 */
718 put_dev_pagemap(hmm_vma_walk->pgmap);
719 hmm_vma_walk->pgmap = NULL;
720 }
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700721 pte_unmap(ptep - 1);
722
Jérôme Glisse53f5c3f2018-04-10 16:28:59 -0700723 hmm_vma_walk->last = addr;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700724 return 0;
725}
726
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700727static int hmm_vma_walk_pud(pud_t *pudp,
728 unsigned long start,
729 unsigned long end,
730 struct mm_walk *walk)
731{
732 struct hmm_vma_walk *hmm_vma_walk = walk->private;
733 struct hmm_range *range = hmm_vma_walk->range;
734 unsigned long addr = start, next;
735 pmd_t *pmdp;
736 pud_t pud;
737 int ret;
738
739again:
740 pud = READ_ONCE(*pudp);
741 if (pud_none(pud))
742 return hmm_vma_walk_hole(start, end, walk);
743
744 if (pud_huge(pud) && pud_devmap(pud)) {
745 unsigned long i, npages, pfn;
746 uint64_t *pfns, cpu_flags;
747 bool fault, write_fault;
748
749 if (!pud_present(pud))
750 return hmm_vma_walk_hole(start, end, walk);
751
752 i = (addr - range->start) >> PAGE_SHIFT;
753 npages = (end - addr) >> PAGE_SHIFT;
754 pfns = &range->pfns[i];
755
756 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
757 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
758 cpu_flags, &fault, &write_fault);
759 if (fault || write_fault)
760 return hmm_vma_walk_hole_(addr, end, fault,
761 write_fault, walk);
762
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700763 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
764 for (i = 0; i < npages; ++i, ++pfn) {
765 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
766 hmm_vma_walk->pgmap);
767 if (unlikely(!hmm_vma_walk->pgmap))
768 return -EBUSY;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700769 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
770 cpu_flags;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700771 }
772 if (hmm_vma_walk->pgmap) {
773 put_dev_pagemap(hmm_vma_walk->pgmap);
774 hmm_vma_walk->pgmap = NULL;
775 }
776 hmm_vma_walk->last = end;
777 return 0;
Jérôme Glisse992de9a2019-05-13 17:20:21 -0700778 }
779
780 split_huge_pud(walk->vma, pudp, addr);
781 if (pud_none(*pudp))
782 goto again;
783
784 pmdp = pmd_offset(pudp, addr);
785 do {
786 next = pmd_addr_end(addr, end);
787 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
788 if (ret)
789 return ret;
790 } while (pmdp++, addr = next, addr != end);
791
792 return 0;
793}
794
Jérôme Glisse63d50662019-05-13 17:20:18 -0700795static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
796 unsigned long start, unsigned long end,
797 struct mm_walk *walk)
798{
799#ifdef CONFIG_HUGETLB_PAGE
800 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
801 struct hmm_vma_walk *hmm_vma_walk = walk->private;
802 struct hmm_range *range = hmm_vma_walk->range;
803 struct vm_area_struct *vma = walk->vma;
804 struct hstate *h = hstate_vma(vma);
805 uint64_t orig_pfn, cpu_flags;
806 bool fault, write_fault;
807 spinlock_t *ptl;
808 pte_t entry;
809 int ret = 0;
810
811 size = 1UL << huge_page_shift(h);
812 mask = size - 1;
813 if (range->page_shift != PAGE_SHIFT) {
814 /* Make sure we are looking at full page. */
815 if (start & mask)
816 return -EINVAL;
817 if (end < (start + size))
818 return -EINVAL;
819 pfn_inc = size >> PAGE_SHIFT;
820 } else {
821 pfn_inc = 1;
822 size = PAGE_SIZE;
823 }
824
825
826 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
827 entry = huge_ptep_get(pte);
828
829 i = (start - range->start) >> range->page_shift;
830 orig_pfn = range->pfns[i];
831 range->pfns[i] = range->values[HMM_PFN_NONE];
832 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
833 fault = write_fault = false;
834 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
835 &fault, &write_fault);
836 if (fault || write_fault) {
837 ret = -ENOENT;
838 goto unlock;
839 }
840
841 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
842 for (; addr < end; addr += size, i++, pfn += pfn_inc)
Jérôme Glisse391aab12019-05-13 17:20:31 -0700843 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
844 cpu_flags;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700845 hmm_vma_walk->last = end;
846
847unlock:
848 spin_unlock(ptl);
849
850 if (ret == -ENOENT)
851 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
852
853 return ret;
854#else /* CONFIG_HUGETLB_PAGE */
855 return -EINVAL;
856#endif
857}
858
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700859static void hmm_pfns_clear(struct hmm_range *range,
860 uint64_t *pfns,
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700861 unsigned long addr,
862 unsigned long end)
863{
864 for (; addr < end; addr += PAGE_SIZE, pfns++)
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700865 *pfns = range->values[HMM_PFN_NONE];
Jérôme Glisse33cd47d2018-04-10 16:28:54 -0700866}
867
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700868/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700869 * hmm_range_register() - start tracking change to CPU page table over a range
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700870 * @range: range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700871 * @mm: the mm struct for the range of virtual address
872 * @start: start virtual address (inclusive)
873 * @end: end virtual address (exclusive)
Jérôme Glisse63d50662019-05-13 17:20:18 -0700874 * @page_shift: expect page shift for the range
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700875 * Returns 0 on success, -EFAULT if the address space is no longer valid
Jérôme Glisse25f23a02019-05-13 17:19:55 -0700876 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700877 * Track updates to the CPU page table see include/linux/hmm.h
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700878 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700879int hmm_range_register(struct hmm_range *range,
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300880 struct hmm_mirror *mirror,
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700881 unsigned long start,
Jérôme Glisse63d50662019-05-13 17:20:18 -0700882 unsigned long end,
883 unsigned page_shift)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700884{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700885 unsigned long mask = ((1UL << page_shift) - 1UL);
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300886 struct hmm *hmm = mirror->hmm;
Jérôme Glisse63d50662019-05-13 17:20:18 -0700887
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700888 range->valid = false;
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700889 range->hmm = NULL;
890
Jérôme Glisse63d50662019-05-13 17:20:18 -0700891 if ((start & mask) || (end & mask))
892 return -EINVAL;
893 if (start >= end)
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700894 return -EINVAL;
895
Jérôme Glisse63d50662019-05-13 17:20:18 -0700896 range->page_shift = page_shift;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700897 range->start = start;
898 range->end = end;
899
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300900 /* Prevent hmm_release() from running while the range is valid */
901 if (!mmget_not_zero(hmm->mm))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700902 return -EFAULT;
Jérôme Glisse86586a42018-04-10 16:28:34 -0700903
Ralph Campbell085ea252019-05-06 16:29:39 -0700904 /* Initialize range to track CPU page table updates. */
905 mutex_lock(&hmm->lock);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700906
Ralph Campbell085ea252019-05-06 16:29:39 -0700907 range->hmm = hmm;
Jason Gunthorpee36acfe2019-05-23 09:41:19 -0300908 kref_get(&hmm->kref);
Jason Gunthorpe157816f2019-05-23 11:43:43 -0300909 list_add(&range->list, &hmm->ranges);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700910
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700911 /*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700912 * If there are any concurrent notifiers we have to wait for them for
913 * the range to be valid (see hmm_range_wait_until_valid()).
Jérôme Glisse704f3f22019-05-13 17:19:48 -0700914 */
Ralph Campbell085ea252019-05-06 16:29:39 -0700915 if (!hmm->notifiers)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700916 range->valid = true;
Ralph Campbell085ea252019-05-06 16:29:39 -0700917 mutex_unlock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700918
919 return 0;
920}
921EXPORT_SYMBOL(hmm_range_register);
922
923/*
924 * hmm_range_unregister() - stop tracking change to CPU page table over a range
925 * @range: range
926 *
927 * Range struct is used to track updates to the CPU page table after a call to
928 * hmm_range_register(). See include/linux/hmm.h for how to use it.
929 */
930void hmm_range_unregister(struct hmm_range *range)
931{
Ralph Campbell085ea252019-05-06 16:29:39 -0700932 struct hmm *hmm = range->hmm;
933
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700934 /* Sanity check this really should not happen. */
Ralph Campbell085ea252019-05-06 16:29:39 -0700935 if (hmm == NULL || range->end <= range->start)
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700936 return;
937
Ralph Campbell085ea252019-05-06 16:29:39 -0700938 mutex_lock(&hmm->lock);
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300939 list_del_init(&range->list);
Ralph Campbell085ea252019-05-06 16:29:39 -0700940 mutex_unlock(&hmm->lock);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700941
942 /* Drop reference taken by hmm_range_register() */
943 range->valid = false;
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300944 mmput(hmm->mm);
Ralph Campbell085ea252019-05-06 16:29:39 -0700945 hmm_put(hmm);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700946 range->hmm = NULL;
947}
948EXPORT_SYMBOL(hmm_range_unregister);
949
950/*
951 * hmm_range_snapshot() - snapshot CPU page table for a range
952 * @range: range
Ralph Campbell085ea252019-05-06 16:29:39 -0700953 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700954 * permission (for instance asking for write and range is read only),
955 * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
956 * vma or it is illegal to access that range), number of valid pages
957 * in range->pfns[] (from range start address).
958 *
959 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
960 * validity is tracked by range struct. See in include/linux/hmm.h for example
961 * on how to use.
962 */
963long hmm_range_snapshot(struct hmm_range *range)
964{
Jérôme Glisse63d50662019-05-13 17:20:18 -0700965 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700966 unsigned long start = range->start, end;
967 struct hmm_vma_walk hmm_vma_walk;
968 struct hmm *hmm = range->hmm;
969 struct vm_area_struct *vma;
970 struct mm_walk mm_walk;
971
Jason Gunthorpe47f24592019-05-23 11:08:28 -0300972 lockdep_assert_held(&hmm->mm->mmap_sem);
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700973 do {
974 /* If range is no longer valid force retry. */
975 if (!range->valid)
976 return -EAGAIN;
977
978 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -0700979 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700980 return -EFAULT;
981
Jérôme Glisse63d50662019-05-13 17:20:18 -0700982 if (is_vm_hugetlb_page(vma)) {
Jason Gunthorpe1c2308f02019-05-27 17:02:21 -0300983 if (huge_page_shift(hstate_vma(vma)) !=
984 range->page_shift &&
Jérôme Glisse63d50662019-05-13 17:20:18 -0700985 range->page_shift != PAGE_SHIFT)
986 return -EINVAL;
987 } else {
988 if (range->page_shift != PAGE_SHIFT)
989 return -EINVAL;
990 }
991
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700992 if (!(vma->vm_flags & VM_READ)) {
993 /*
994 * If vma do not allow read access, then assume that it
995 * does not allow write access, either. HMM does not
996 * support architecture that allow write without read.
997 */
998 hmm_pfns_clear(range, range->pfns,
999 range->start, range->end);
1000 return -EPERM;
1001 }
1002
1003 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001004 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001005 hmm_vma_walk.last = start;
1006 hmm_vma_walk.fault = false;
1007 hmm_vma_walk.range = range;
1008 mm_walk.private = &hmm_vma_walk;
1009 end = min(range->end, vma->vm_end);
1010
1011 mm_walk.vma = vma;
1012 mm_walk.mm = vma->vm_mm;
1013 mm_walk.pte_entry = NULL;
1014 mm_walk.test_walk = NULL;
1015 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001016 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001017 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1018 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001019 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001020
1021 walk_page_range(start, end, &mm_walk);
1022 start = end;
1023 } while (start < range->end);
1024
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001025 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001026}
Jérôme Glisse25f23a02019-05-13 17:19:55 -07001027EXPORT_SYMBOL(hmm_range_snapshot);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -07001028
1029/*
Jérôme Glisse73231612019-05-13 17:19:58 -07001030 * hmm_range_fault() - try to fault some address in a virtual address range
Jérôme Glisse08232a42018-04-10 16:28:30 -07001031 * @range: range being faulted
Jérôme Glisse74eee182017-09-08 16:11:35 -07001032 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001033 * Return: number of valid pages in range->pfns[] (from range start
Jérôme Glisse73231612019-05-13 17:19:58 -07001034 * address). This may be zero. If the return value is negative,
1035 * then one of the following values may be returned:
1036 *
1037 * -EINVAL invalid arguments or mm or virtual address are in an
Jérôme Glisse63d50662019-05-13 17:20:18 -07001038 * invalid vma (for instance device file vma).
Jérôme Glisse73231612019-05-13 17:19:58 -07001039 * -ENOMEM: Out of memory.
1040 * -EPERM: Invalid permission (for instance asking for write and
1041 * range is read only).
1042 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1043 * happens if block argument is false.
1044 * -EBUSY: If the the range is being invalidated and you should wait
1045 * for invalidation to finish.
1046 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1047 * that range), number of valid pages in range->pfns[] (from
1048 * range start address).
Jérôme Glisse74eee182017-09-08 16:11:35 -07001049 *
1050 * This is similar to a regular CPU page fault except that it will not trigger
Jérôme Glisse73231612019-05-13 17:19:58 -07001051 * any memory migration if the memory being faulted is not accessible by CPUs
1052 * and caller does not ask for migration.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001053 *
Jérôme Glisseff05c0c2018-04-10 16:28:38 -07001054 * On error, for one virtual address in the range, the function will mark the
1055 * corresponding HMM pfn entry with an error flag.
Jérôme Glisse74eee182017-09-08 16:11:35 -07001056 */
Jérôme Glisse73231612019-05-13 17:19:58 -07001057long hmm_range_fault(struct hmm_range *range, bool block)
Jérôme Glisse74eee182017-09-08 16:11:35 -07001058{
Jérôme Glisse63d50662019-05-13 17:20:18 -07001059 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001060 unsigned long start = range->start, end;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001061 struct hmm_vma_walk hmm_vma_walk;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001062 struct hmm *hmm = range->hmm;
1063 struct vm_area_struct *vma;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001064 struct mm_walk mm_walk;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001065 int ret;
1066
Jason Gunthorpe47f24592019-05-23 11:08:28 -03001067 lockdep_assert_held(&hmm->mm->mmap_sem);
Jérôme Glisse74eee182017-09-08 16:11:35 -07001068
1069 do {
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001070 /* If range is no longer valid force retry. */
1071 if (!range->valid) {
1072 up_read(&hmm->mm->mmap_sem);
1073 return -EAGAIN;
1074 }
Jérôme Glisse74eee182017-09-08 16:11:35 -07001075
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001076 vma = find_vma(hmm->mm, start);
Jérôme Glisse63d50662019-05-13 17:20:18 -07001077 if (vma == NULL || (vma->vm_flags & device_vma))
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001078 return -EFAULT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001079
Jérôme Glisse63d50662019-05-13 17:20:18 -07001080 if (is_vm_hugetlb_page(vma)) {
1081 if (huge_page_shift(hstate_vma(vma)) !=
1082 range->page_shift &&
1083 range->page_shift != PAGE_SHIFT)
1084 return -EINVAL;
1085 } else {
1086 if (range->page_shift != PAGE_SHIFT)
1087 return -EINVAL;
1088 }
1089
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001090 if (!(vma->vm_flags & VM_READ)) {
1091 /*
1092 * If vma do not allow read access, then assume that it
1093 * does not allow write access, either. HMM does not
1094 * support architecture that allow write without read.
1095 */
1096 hmm_pfns_clear(range, range->pfns,
1097 range->start, range->end);
1098 return -EPERM;
1099 }
1100
1101 range->vma = vma;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001102 hmm_vma_walk.pgmap = NULL;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001103 hmm_vma_walk.last = start;
1104 hmm_vma_walk.fault = true;
1105 hmm_vma_walk.block = block;
1106 hmm_vma_walk.range = range;
1107 mm_walk.private = &hmm_vma_walk;
1108 end = min(range->end, vma->vm_end);
1109
1110 mm_walk.vma = vma;
1111 mm_walk.mm = vma->vm_mm;
1112 mm_walk.pte_entry = NULL;
1113 mm_walk.test_walk = NULL;
1114 mm_walk.hugetlb_entry = NULL;
Jérôme Glisse992de9a2019-05-13 17:20:21 -07001115 mm_walk.pud_entry = hmm_vma_walk_pud;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001116 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1117 mm_walk.pte_hole = hmm_vma_walk_hole;
Jérôme Glisse63d50662019-05-13 17:20:18 -07001118 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
Jérôme Glissea3e0d412019-05-13 17:20:01 -07001119
1120 do {
1121 ret = walk_page_range(start, end, &mm_walk);
1122 start = hmm_vma_walk.last;
1123
1124 /* Keep trying while the range is valid. */
1125 } while (ret == -EBUSY && range->valid);
1126
1127 if (ret) {
1128 unsigned long i;
1129
1130 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1131 hmm_pfns_clear(range, &range->pfns[i],
1132 hmm_vma_walk.last, range->end);
1133 return ret;
1134 }
1135 start = end;
1136
1137 } while (start < range->end);
Jérôme Glisse704f3f22019-05-13 17:19:48 -07001138
Jérôme Glisse73231612019-05-13 17:19:58 -07001139 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
Jérôme Glisse74eee182017-09-08 16:11:35 -07001140}
Jérôme Glisse73231612019-05-13 17:19:58 -07001141EXPORT_SYMBOL(hmm_range_fault);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001142
1143/**
1144 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1145 * @range: range being faulted
1146 * @device: device against to dma map page to
1147 * @daddrs: dma address of mapped pages
1148 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
Ralph Campbell085ea252019-05-06 16:29:39 -07001149 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001150 * drop and you need to try again, some other error value otherwise
1151 *
1152 * Note same usage pattern as hmm_range_fault().
1153 */
1154long hmm_range_dma_map(struct hmm_range *range,
1155 struct device *device,
1156 dma_addr_t *daddrs,
1157 bool block)
1158{
1159 unsigned long i, npages, mapped;
1160 long ret;
1161
1162 ret = hmm_range_fault(range, block);
1163 if (ret <= 0)
1164 return ret ? ret : -EBUSY;
1165
1166 npages = (range->end - range->start) >> PAGE_SHIFT;
1167 for (i = 0, mapped = 0; i < npages; ++i) {
1168 enum dma_data_direction dir = DMA_TO_DEVICE;
1169 struct page *page;
1170
1171 /*
1172 * FIXME need to update DMA API to provide invalid DMA address
1173 * value instead of a function to test dma address value. This
1174 * would remove lot of dumb code duplicated accross many arch.
1175 *
1176 * For now setting it to 0 here is good enough as the pfns[]
1177 * value is what is use to check what is valid and what isn't.
1178 */
1179 daddrs[i] = 0;
1180
Jérôme Glisse391aab12019-05-13 17:20:31 -07001181 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001182 if (page == NULL)
1183 continue;
1184
1185 /* Check if range is being invalidated */
1186 if (!range->valid) {
1187 ret = -EBUSY;
1188 goto unmap;
1189 }
1190
1191 /* If it is read and write than map bi-directional. */
1192 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1193 dir = DMA_BIDIRECTIONAL;
1194
1195 daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
1196 if (dma_mapping_error(device, daddrs[i])) {
1197 ret = -EFAULT;
1198 goto unmap;
1199 }
1200
1201 mapped++;
1202 }
1203
1204 return mapped;
1205
1206unmap:
1207 for (npages = i, i = 0; (i < npages) && mapped; ++i) {
1208 enum dma_data_direction dir = DMA_TO_DEVICE;
1209 struct page *page;
1210
Jérôme Glisse391aab12019-05-13 17:20:31 -07001211 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001212 if (page == NULL)
1213 continue;
1214
1215 if (dma_mapping_error(device, daddrs[i]))
1216 continue;
1217
1218 /* If it is read and write than map bi-directional. */
1219 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1220 dir = DMA_BIDIRECTIONAL;
1221
1222 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1223 mapped--;
1224 }
1225
1226 return ret;
1227}
1228EXPORT_SYMBOL(hmm_range_dma_map);
1229
1230/**
1231 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1232 * @range: range being unmapped
1233 * @vma: the vma against which the range (optional)
1234 * @device: device against which dma map was done
1235 * @daddrs: dma address of mapped pages
1236 * @dirty: dirty page if it had the write flag set
Ralph Campbell085ea252019-05-06 16:29:39 -07001237 * Return: number of page unmapped on success, -EINVAL otherwise
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001238 *
1239 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1240 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1241 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1242 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1243 */
1244long hmm_range_dma_unmap(struct hmm_range *range,
1245 struct vm_area_struct *vma,
1246 struct device *device,
1247 dma_addr_t *daddrs,
1248 bool dirty)
1249{
1250 unsigned long i, npages;
1251 long cpages = 0;
1252
1253 /* Sanity check. */
1254 if (range->end <= range->start)
1255 return -EINVAL;
1256 if (!daddrs)
1257 return -EINVAL;
1258 if (!range->pfns)
1259 return -EINVAL;
1260
1261 npages = (range->end - range->start) >> PAGE_SHIFT;
1262 for (i = 0; i < npages; ++i) {
1263 enum dma_data_direction dir = DMA_TO_DEVICE;
1264 struct page *page;
1265
Jérôme Glisse391aab12019-05-13 17:20:31 -07001266 page = hmm_device_entry_to_page(range, range->pfns[i]);
Jérôme Glisse55c0ece2019-05-13 17:20:28 -07001267 if (page == NULL)
1268 continue;
1269
1270 /* If it is read and write than map bi-directional. */
1271 if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
1272 dir = DMA_BIDIRECTIONAL;
1273
1274 /*
1275 * See comments in function description on why it is
1276 * safe here to call set_page_dirty()
1277 */
1278 if (dirty)
1279 set_page_dirty(page);
1280 }
1281
1282 /* Unmap and clear pfns/dma address */
1283 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1284 range->pfns[i] = range->values[HMM_PFN_NONE];
1285 /* FIXME see comments in hmm_vma_dma_map() */
1286 daddrs[i] = 0;
1287 cpages++;
1288 }
1289
1290 return cpages;
1291}
1292EXPORT_SYMBOL(hmm_range_dma_unmap);
Jérôme Glissec0b12402017-09-08 16:11:27 -07001293#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001294
1295
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001296#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001297struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
1298 unsigned long addr)
1299{
1300 struct page *page;
1301
1302 page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
1303 if (!page)
1304 return NULL;
1305 lock_page(page);
1306 return page;
1307}
1308EXPORT_SYMBOL(hmm_vma_alloc_locked_page);
1309
1310
1311static void hmm_devmem_ref_release(struct percpu_ref *ref)
1312{
1313 struct hmm_devmem *devmem;
1314
1315 devmem = container_of(ref, struct hmm_devmem, ref);
1316 complete(&devmem->completion);
1317}
1318
1319static void hmm_devmem_ref_exit(void *data)
1320{
1321 struct percpu_ref *ref = data;
1322 struct hmm_devmem *devmem;
1323
1324 devmem = container_of(ref, struct hmm_devmem, ref);
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001325 wait_for_completion(&devmem->completion);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001326 percpu_ref_exit(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001327}
1328
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001329static void hmm_devmem_ref_kill(struct percpu_ref *ref)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001330{
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001331 percpu_ref_kill(ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001332}
1333
Souptick Joarderb57e622e62019-03-11 23:28:10 -07001334static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001335 unsigned long addr,
1336 const struct page *page,
1337 unsigned int flags,
1338 pmd_t *pmdp)
1339{
1340 struct hmm_devmem *devmem = page->pgmap->data;
1341
1342 return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
1343}
1344
1345static void hmm_devmem_free(struct page *page, void *data)
1346{
1347 struct hmm_devmem *devmem = data;
1348
Dan Williams2fa147b2018-07-13 21:50:01 -07001349 page->mapping = NULL;
1350
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001351 devmem->ops->free(devmem, page);
1352}
1353
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001354/*
1355 * hmm_devmem_add() - hotplug ZONE_DEVICE memory for device memory
1356 *
1357 * @ops: memory event device driver callback (see struct hmm_devmem_ops)
1358 * @device: device struct to bind the resource too
1359 * @size: size in bytes of the device memory to add
Ralph Campbell085ea252019-05-06 16:29:39 -07001360 * Return: pointer to new hmm_devmem struct ERR_PTR otherwise
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001361 *
1362 * This function first finds an empty range of physical address big enough to
1363 * contain the new resource, and then hotplugs it as ZONE_DEVICE memory, which
1364 * in turn allocates struct pages. It does not do anything beyond that; all
1365 * events affecting the memory will go through the various callbacks provided
1366 * by hmm_devmem_ops struct.
1367 *
1368 * Device driver should call this function during device initialization and
1369 * is then responsible of memory management. HMM only provides helpers.
1370 */
1371struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
1372 struct device *device,
1373 unsigned long size)
1374{
1375 struct hmm_devmem *devmem;
1376 resource_size_t addr;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001377 void *result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001378 int ret;
1379
Dan Williamse76384882018-05-16 11:46:08 -07001380 dev_pagemap_get_ops();
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001381
Dan Williams58ef15b2018-12-28 00:35:07 -08001382 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001383 if (!devmem)
1384 return ERR_PTR(-ENOMEM);
1385
1386 init_completion(&devmem->completion);
1387 devmem->pfn_first = -1UL;
1388 devmem->pfn_last = -1UL;
1389 devmem->resource = NULL;
1390 devmem->device = device;
1391 devmem->ops = ops;
1392
1393 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1394 0, GFP_KERNEL);
1395 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001396 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001397
Dan Williams58ef15b2018-12-28 00:35:07 -08001398 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit, &devmem->ref);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001399 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001400 return ERR_PTR(ret);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001401
1402 size = ALIGN(size, PA_SECTION_SIZE);
1403 addr = min((unsigned long)iomem_resource.end,
1404 (1UL << MAX_PHYSMEM_BITS) - 1);
1405 addr = addr - size + 1UL;
1406
1407 /*
1408 * FIXME add a new helper to quickly walk resource tree and find free
1409 * range
1410 *
1411 * FIXME what about ioport_resource resource ?
1412 */
1413 for (; addr > size && addr >= iomem_resource.start; addr -= size) {
1414 ret = region_intersects(addr, size, 0, IORES_DESC_NONE);
1415 if (ret != REGION_DISJOINT)
1416 continue;
1417
1418 devmem->resource = devm_request_mem_region(device, addr, size,
1419 dev_name(device));
Dan Williams58ef15b2018-12-28 00:35:07 -08001420 if (!devmem->resource)
1421 return ERR_PTR(-ENOMEM);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001422 break;
1423 }
Dan Williams58ef15b2018-12-28 00:35:07 -08001424 if (!devmem->resource)
1425 return ERR_PTR(-ERANGE);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001426
1427 devmem->resource->desc = IORES_DESC_DEVICE_PRIVATE_MEMORY;
1428 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1429 devmem->pfn_last = devmem->pfn_first +
1430 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001431 devmem->page_fault = hmm_devmem_fault;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001432
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001433 devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
1434 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001435 devmem->pagemap.page_free = hmm_devmem_free;
1436 devmem->pagemap.altmap_valid = false;
1437 devmem->pagemap.ref = &devmem->ref;
1438 devmem->pagemap.data = devmem;
1439 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001440
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001441 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1442 if (IS_ERR(result))
1443 return result;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001444 return devmem;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001445}
Dan Williams02917e92018-12-28 00:35:15 -08001446EXPORT_SYMBOL_GPL(hmm_devmem_add);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001447
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001448struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
1449 struct device *device,
1450 struct resource *res)
1451{
1452 struct hmm_devmem *devmem;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001453 void *result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001454 int ret;
1455
1456 if (res->desc != IORES_DESC_DEVICE_PUBLIC_MEMORY)
1457 return ERR_PTR(-EINVAL);
1458
Dan Williamse76384882018-05-16 11:46:08 -07001459 dev_pagemap_get_ops();
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001460
Dan Williams58ef15b2018-12-28 00:35:07 -08001461 devmem = devm_kzalloc(device, sizeof(*devmem), GFP_KERNEL);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001462 if (!devmem)
1463 return ERR_PTR(-ENOMEM);
1464
1465 init_completion(&devmem->completion);
1466 devmem->pfn_first = -1UL;
1467 devmem->pfn_last = -1UL;
1468 devmem->resource = res;
1469 devmem->device = device;
1470 devmem->ops = ops;
1471
1472 ret = percpu_ref_init(&devmem->ref, &hmm_devmem_ref_release,
1473 0, GFP_KERNEL);
1474 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001475 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001476
Dan Williams58ef15b2018-12-28 00:35:07 -08001477 ret = devm_add_action_or_reset(device, hmm_devmem_ref_exit,
1478 &devmem->ref);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001479 if (ret)
Dan Williams58ef15b2018-12-28 00:35:07 -08001480 return ERR_PTR(ret);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001481
1482 devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
1483 devmem->pfn_last = devmem->pfn_first +
1484 (resource_size(devmem->resource) >> PAGE_SHIFT);
Dan Williams063a7d12018-12-28 00:39:46 -08001485 devmem->page_fault = hmm_devmem_fault;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001486
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001487 devmem->pagemap.type = MEMORY_DEVICE_PUBLIC;
1488 devmem->pagemap.res = *devmem->resource;
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001489 devmem->pagemap.page_free = hmm_devmem_free;
1490 devmem->pagemap.altmap_valid = false;
1491 devmem->pagemap.ref = &devmem->ref;
1492 devmem->pagemap.data = devmem;
1493 devmem->pagemap.kill = hmm_devmem_ref_kill;
Dan Williams58ef15b2018-12-28 00:35:07 -08001494
Dan Williamsbbecd94e2018-12-28 00:35:11 -08001495 result = devm_memremap_pages(devmem->device, &devmem->pagemap);
1496 if (IS_ERR(result))
1497 return result;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001498 return devmem;
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001499}
Dan Williams02917e92018-12-28 00:35:15 -08001500EXPORT_SYMBOL_GPL(hmm_devmem_add_resource);
Jérôme Glissed3df0a42017-09-08 16:12:28 -07001501
Jérôme Glisse4ef589d2017-09-08 16:11:58 -07001502/*
Jérôme Glisse858b54d2017-09-08 16:12:02 -07001503 * A device driver that wants to handle multiple devices memory through a
1504 * single fake device can use hmm_device to do so. This is purely a helper
1505 * and it is not needed to make use of any HMM functionality.
1506 */
1507#define HMM_DEVICE_MAX 256
1508
1509static DECLARE_BITMAP(hmm_device_mask, HMM_DEVICE_MAX);
1510static DEFINE_SPINLOCK(hmm_device_lock);
1511static struct class *hmm_device_class;
1512static dev_t hmm_device_devt;
1513
1514static void hmm_device_release(struct device *device)
1515{
1516 struct hmm_device *hmm_device;
1517
1518 hmm_device = container_of(device, struct hmm_device, device);
1519 spin_lock(&hmm_device_lock);
1520 clear_bit(hmm_device->minor, hmm_device_mask);
1521 spin_unlock(&hmm_device_lock);
1522
1523 kfree(hmm_device);
1524}
1525
1526struct hmm_device *hmm_device_new(void *drvdata)
1527{
1528 struct hmm_device *hmm_device;
1529
1530 hmm_device = kzalloc(sizeof(*hmm_device), GFP_KERNEL);
1531 if (!hmm_device)
1532 return ERR_PTR(-ENOMEM);
1533
1534 spin_lock(&hmm_device_lock);
1535 hmm_device->minor = find_first_zero_bit(hmm_device_mask, HMM_DEVICE_MAX);
1536 if (hmm_device->minor >= HMM_DEVICE_MAX) {
1537 spin_unlock(&hmm_device_lock);
1538 kfree(hmm_device);
1539 return ERR_PTR(-EBUSY);
1540 }
1541 set_bit(hmm_device->minor, hmm_device_mask);
1542 spin_unlock(&hmm_device_lock);
1543
1544 dev_set_name(&hmm_device->device, "hmm_device%d", hmm_device->minor);
1545 hmm_device->device.devt = MKDEV(MAJOR(hmm_device_devt),
1546 hmm_device->minor);
1547 hmm_device->device.release = hmm_device_release;
1548 dev_set_drvdata(&hmm_device->device, drvdata);
1549 hmm_device->device.class = hmm_device_class;
1550 device_initialize(&hmm_device->device);
1551
1552 return hmm_device;
1553}
1554EXPORT_SYMBOL(hmm_device_new);
1555
1556void hmm_device_put(struct hmm_device *hmm_device)
1557{
1558 put_device(&hmm_device->device);
1559}
1560EXPORT_SYMBOL(hmm_device_put);
1561
1562static int __init hmm_init(void)
1563{
1564 int ret;
1565
1566 ret = alloc_chrdev_region(&hmm_device_devt, 0,
1567 HMM_DEVICE_MAX,
1568 "hmm_device");
1569 if (ret)
1570 return ret;
1571
1572 hmm_device_class = class_create(THIS_MODULE, "hmm_device");
1573 if (IS_ERR(hmm_device_class)) {
1574 unregister_chrdev_region(hmm_device_devt, HMM_DEVICE_MAX);
1575 return PTR_ERR(hmm_device_class);
1576 }
1577 return 0;
1578}
1579
1580device_initcall(hmm_init);
Jérôme Glissedf6ad692017-09-08 16:12:24 -07001581#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */