Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2013 Red Hat Inc. |
| 4 | * |
Jérôme Glisse | f813f21 | 2018-10-30 15:04:06 -0700 | [diff] [blame] | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 6 | */ |
| 7 | /* |
| 8 | * Refer to include/linux/hmm.h for information about heterogeneous memory |
| 9 | * management or HMM for short. |
| 10 | */ |
Christoph Hellwig | a520110 | 2019-08-28 16:19:53 +0200 | [diff] [blame^] | 11 | #include <linux/pagewalk.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 12 | #include <linux/hmm.h> |
Jérôme Glisse | 858b54d | 2017-09-08 16:12:02 -0700 | [diff] [blame] | 13 | #include <linux/init.h> |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 14 | #include <linux/rmap.h> |
| 15 | #include <linux/swap.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 16 | #include <linux/slab.h> |
| 17 | #include <linux/sched.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 18 | #include <linux/mmzone.h> |
| 19 | #include <linux/pagemap.h> |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 20 | #include <linux/swapops.h> |
| 21 | #include <linux/hugetlb.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 22 | #include <linux/memremap.h> |
Jason Gunthorpe | c8a53b2 | 2019-05-23 10:36:46 -0300 | [diff] [blame] | 23 | #include <linux/sched/mm.h> |
Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 24 | #include <linux/jump_label.h> |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 25 | #include <linux/dma-mapping.h> |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 26 | #include <linux/mmu_notifier.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 27 | #include <linux/memory_hotplug.h> |
| 28 | |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 29 | static struct mmu_notifier *hmm_alloc_notifier(struct mm_struct *mm) |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 30 | { |
Jason Gunthorpe | 8a9320b | 2019-05-23 10:24:13 -0300 | [diff] [blame] | 31 | struct hmm *hmm; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 32 | |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 33 | hmm = kzalloc(sizeof(*hmm), GFP_KERNEL); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 34 | if (!hmm) |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 35 | return ERR_PTR(-ENOMEM); |
| 36 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 37 | init_waitqueue_head(&hmm->wq); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 38 | INIT_LIST_HEAD(&hmm->mirrors); |
| 39 | init_rwsem(&hmm->mirrors_sem); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 40 | INIT_LIST_HEAD(&hmm->ranges); |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 41 | spin_lock_init(&hmm->ranges_lock); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 42 | hmm->notifiers = 0; |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 43 | return &hmm->mmu_notifier; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 44 | } |
| 45 | |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 46 | static void hmm_free_notifier(struct mmu_notifier *mn) |
Jason Gunthorpe | 6d7c3cd | 2019-05-22 16:52:52 -0300 | [diff] [blame] | 47 | { |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 48 | struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
Jason Gunthorpe | 8a9320b | 2019-05-23 10:24:13 -0300 | [diff] [blame] | 49 | |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 50 | WARN_ON(!list_empty(&hmm->ranges)); |
| 51 | WARN_ON(!list_empty(&hmm->mirrors)); |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 52 | kfree(hmm); |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 53 | } |
| 54 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 55 | static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 56 | { |
Jason Gunthorpe | 6d7c3cd | 2019-05-22 16:52:52 -0300 | [diff] [blame] | 57 | struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 58 | struct hmm_mirror *mirror; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 59 | |
Jason Gunthorpe | 47f2459 | 2019-05-23 11:08:28 -0300 | [diff] [blame] | 60 | /* |
| 61 | * Since hmm_range_register() holds the mmget() lock hmm_release() is |
| 62 | * prevented as long as a range exists. |
| 63 | */ |
| 64 | WARN_ON(!list_empty_careful(&hmm->ranges)); |
Ralph Campbell | e140151 | 2018-04-10 16:28:19 -0700 | [diff] [blame] | 65 | |
Jason Gunthorpe | 1433172 | 2019-05-24 12:14:08 -0300 | [diff] [blame] | 66 | down_read(&hmm->mirrors_sem); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 67 | list_for_each_entry(mirror, &hmm->mirrors, list) { |
Jason Gunthorpe | 1433172 | 2019-05-24 12:14:08 -0300 | [diff] [blame] | 68 | /* |
| 69 | * Note: The driver is not allowed to trigger |
| 70 | * hmm_mirror_unregister() from this thread. |
| 71 | */ |
| 72 | if (mirror->ops->release) |
Ralph Campbell | e140151 | 2018-04-10 16:28:19 -0700 | [diff] [blame] | 73 | mirror->ops->release(mirror); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 74 | } |
| 75 | up_read(&hmm->mirrors_sem); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 78 | static void notifiers_decrement(struct hmm *hmm) |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 79 | { |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 80 | unsigned long flags; |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 81 | |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 82 | spin_lock_irqsave(&hmm->ranges_lock, flags); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 83 | hmm->notifiers--; |
| 84 | if (!hmm->notifiers) { |
| 85 | struct hmm_range *range; |
| 86 | |
| 87 | list_for_each_entry(range, &hmm->ranges, list) { |
| 88 | if (range->valid) |
| 89 | continue; |
| 90 | range->valid = true; |
| 91 | } |
| 92 | wake_up_all(&hmm->wq); |
| 93 | } |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 94 | spin_unlock_irqrestore(&hmm->ranges_lock, flags); |
| 95 | } |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 96 | |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 97 | static int hmm_invalidate_range_start(struct mmu_notifier *mn, |
| 98 | const struct mmu_notifier_range *nrange) |
| 99 | { |
Jason Gunthorpe | 6d7c3cd | 2019-05-22 16:52:52 -0300 | [diff] [blame] | 100 | struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 101 | struct hmm_mirror *mirror; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 102 | struct hmm_range *range; |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 103 | unsigned long flags; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 104 | int ret = 0; |
| 105 | |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 106 | spin_lock_irqsave(&hmm->ranges_lock, flags); |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 107 | hmm->notifiers++; |
| 108 | list_for_each_entry(range, &hmm->ranges, list) { |
Ralph Campbell | 1f96180 | 2019-07-25 17:56:44 -0700 | [diff] [blame] | 109 | if (nrange->end < range->start || nrange->start >= range->end) |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 110 | continue; |
| 111 | |
| 112 | range->valid = false; |
| 113 | } |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 114 | spin_unlock_irqrestore(&hmm->ranges_lock, flags); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 115 | |
| 116 | if (mmu_notifier_range_blockable(nrange)) |
| 117 | down_read(&hmm->mirrors_sem); |
| 118 | else if (!down_read_trylock(&hmm->mirrors_sem)) { |
| 119 | ret = -EAGAIN; |
| 120 | goto out; |
| 121 | } |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 122 | |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 123 | list_for_each_entry(mirror, &hmm->mirrors, list) { |
| 124 | int rc; |
| 125 | |
Ralph Campbell | 1f96180 | 2019-07-25 17:56:44 -0700 | [diff] [blame] | 126 | rc = mirror->ops->sync_cpu_device_pagetables(mirror, nrange); |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 127 | if (rc) { |
Ralph Campbell | 1f96180 | 2019-07-25 17:56:44 -0700 | [diff] [blame] | 128 | if (WARN_ON(mmu_notifier_range_blockable(nrange) || |
| 129 | rc != -EAGAIN)) |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 130 | continue; |
| 131 | ret = -EAGAIN; |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 132 | break; |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 133 | } |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 134 | } |
| 135 | up_read(&hmm->mirrors_sem); |
| 136 | |
| 137 | out: |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 138 | if (ret) |
| 139 | notifiers_decrement(hmm); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 140 | return ret; |
| 141 | } |
| 142 | |
| 143 | static void hmm_invalidate_range_end(struct mmu_notifier *mn, |
| 144 | const struct mmu_notifier_range *nrange) |
| 145 | { |
Jason Gunthorpe | 6d7c3cd | 2019-05-22 16:52:52 -0300 | [diff] [blame] | 146 | struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 147 | |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 148 | notifiers_decrement(hmm); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | static const struct mmu_notifier_ops hmm_mmu_notifier_ops = { |
Ralph Campbell | e140151 | 2018-04-10 16:28:19 -0700 | [diff] [blame] | 152 | .release = hmm_release, |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 153 | .invalidate_range_start = hmm_invalidate_range_start, |
| 154 | .invalidate_range_end = hmm_invalidate_range_end, |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 155 | .alloc_notifier = hmm_alloc_notifier, |
| 156 | .free_notifier = hmm_free_notifier, |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 157 | }; |
| 158 | |
| 159 | /* |
| 160 | * hmm_mirror_register() - register a mirror against an mm |
| 161 | * |
| 162 | * @mirror: new mirror struct to register |
| 163 | * @mm: mm to register against |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 164 | * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 165 | * |
| 166 | * To start mirroring a process address space, the device driver must register |
| 167 | * an HMM mirror struct. |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 168 | * |
| 169 | * The caller cannot unregister the hmm_mirror while any ranges are |
| 170 | * registered. |
| 171 | * |
| 172 | * Callers using this function must put a call to mmu_notifier_synchronize() |
| 173 | * in their module exit functions. |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 174 | */ |
| 175 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm) |
| 176 | { |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 177 | struct mmu_notifier *mn; |
| 178 | |
Linus Torvalds | fec88ab | 2019-07-14 19:42:11 -0700 | [diff] [blame] | 179 | lockdep_assert_held_write(&mm->mmap_sem); |
Jason Gunthorpe | 8a1a0cd | 2019-05-23 11:23:30 -0300 | [diff] [blame] | 180 | |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 181 | /* Sanity check */ |
| 182 | if (!mm || !mirror || !mirror->ops) |
| 183 | return -EINVAL; |
| 184 | |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 185 | mn = mmu_notifier_get_locked(&hmm_mmu_notifier_ops, mm); |
| 186 | if (IS_ERR(mn)) |
| 187 | return PTR_ERR(mn); |
| 188 | mirror->hmm = container_of(mn, struct hmm, mmu_notifier); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 189 | |
| 190 | down_write(&mirror->hmm->mirrors_sem); |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 191 | list_add(&mirror->list, &mirror->hmm->mirrors); |
| 192 | up_write(&mirror->hmm->mirrors_sem); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 193 | |
| 194 | return 0; |
| 195 | } |
| 196 | EXPORT_SYMBOL(hmm_mirror_register); |
| 197 | |
| 198 | /* |
| 199 | * hmm_mirror_unregister() - unregister a mirror |
| 200 | * |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 201 | * @mirror: mirror struct to unregister |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 202 | * |
| 203 | * Stop mirroring a process address space, and cleanup. |
| 204 | */ |
| 205 | void hmm_mirror_unregister(struct hmm_mirror *mirror) |
| 206 | { |
Jason Gunthorpe | 187229c | 2019-05-23 11:31:45 -0300 | [diff] [blame] | 207 | struct hmm *hmm = mirror->hmm; |
Jérôme Glisse | c01cbba | 2018-04-10 16:28:23 -0700 | [diff] [blame] | 208 | |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 209 | down_write(&hmm->mirrors_sem); |
Jason Gunthorpe | 1433172 | 2019-05-24 12:14:08 -0300 | [diff] [blame] | 210 | list_del(&mirror->list); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 211 | up_write(&hmm->mirrors_sem); |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 212 | mmu_notifier_put(&hmm->mmu_notifier); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 213 | } |
| 214 | EXPORT_SYMBOL(hmm_mirror_unregister); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 215 | |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 216 | struct hmm_vma_walk { |
| 217 | struct hmm_range *range; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 218 | struct dev_pagemap *pgmap; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 219 | unsigned long last; |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 220 | unsigned int flags; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 221 | }; |
| 222 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 223 | static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, |
| 224 | bool write_fault, uint64_t *pfn) |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 225 | { |
Kuehling, Felix | 9b1ae60 | 2019-05-10 19:53:24 +0000 | [diff] [blame] | 226 | unsigned int flags = FAULT_FLAG_REMOTE; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 227 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 228 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 229 | struct vm_area_struct *vma = walk->vma; |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 230 | vm_fault_t ret; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 231 | |
Ralph Campbell | 6c64f2b | 2019-08-23 15:17:52 -0700 | [diff] [blame] | 232 | if (!vma) |
| 233 | goto err; |
| 234 | |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 235 | if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY) |
| 236 | flags |= FAULT_FLAG_ALLOW_RETRY; |
| 237 | if (write_fault) |
| 238 | flags |= FAULT_FLAG_WRITE; |
| 239 | |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 240 | ret = handle_mm_fault(vma, addr, flags); |
Jason Gunthorpe | e709acc | 2019-07-24 08:52:58 +0200 | [diff] [blame] | 241 | if (ret & VM_FAULT_RETRY) { |
| 242 | /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */ |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 243 | return -EAGAIN; |
Jason Gunthorpe | e709acc | 2019-07-24 08:52:58 +0200 | [diff] [blame] | 244 | } |
Ralph Campbell | 6c64f2b | 2019-08-23 15:17:52 -0700 | [diff] [blame] | 245 | if (ret & VM_FAULT_ERROR) |
| 246 | goto err; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 247 | |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 248 | return -EBUSY; |
Ralph Campbell | 6c64f2b | 2019-08-23 15:17:52 -0700 | [diff] [blame] | 249 | |
| 250 | err: |
| 251 | *pfn = range->values[HMM_PFN_ERROR]; |
| 252 | return -EFAULT; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 253 | } |
| 254 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 255 | static int hmm_pfns_bad(unsigned long addr, |
| 256 | unsigned long end, |
| 257 | struct mm_walk *walk) |
| 258 | { |
Jérôme Glisse | c719547 | 2018-04-10 16:28:27 -0700 | [diff] [blame] | 259 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 260 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 261 | uint64_t *pfns = range->pfns; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 262 | unsigned long i; |
| 263 | |
| 264 | i = (addr - range->start) >> PAGE_SHIFT; |
| 265 | for (; addr < end; addr += PAGE_SIZE, i++) |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 266 | pfns[i] = range->values[HMM_PFN_ERROR]; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 267 | |
| 268 | return 0; |
| 269 | } |
| 270 | |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 271 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 272 | * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s) |
| 273 | * @addr: range virtual start address (inclusive) |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 274 | * @end: range virtual end address (exclusive) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 275 | * @fault: should we fault or not ? |
| 276 | * @write_fault: write fault ? |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 277 | * @walk: mm_walk structure |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 278 | * Return: 0 on success, -EBUSY after page fault, or page fault error |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 279 | * |
| 280 | * This function will be called whenever pmd_none() or pte_none() returns true, |
| 281 | * or whenever there is no page directory covering the virtual address range. |
| 282 | */ |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 283 | static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, |
| 284 | bool fault, bool write_fault, |
| 285 | struct mm_walk *walk) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 286 | { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 287 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 288 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 289 | uint64_t *pfns = range->pfns; |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 290 | unsigned long i; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 291 | |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 292 | hmm_vma_walk->last = addr; |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 293 | i = (addr - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 294 | |
Ralph Campbell | c18ce674d | 2019-08-23 15:17:53 -0700 | [diff] [blame] | 295 | if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE)) |
| 296 | return -EPERM; |
| 297 | |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 298 | for (; addr < end; addr += PAGE_SIZE, i++) { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 299 | pfns[i] = range->values[HMM_PFN_NONE]; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 300 | if (fault || write_fault) { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 301 | int ret; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 302 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 303 | ret = hmm_vma_do_fault(walk, addr, write_fault, |
| 304 | &pfns[i]); |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 305 | if (ret != -EBUSY) |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 306 | return ret; |
| 307 | } |
| 308 | } |
| 309 | |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 310 | return (fault || write_fault) ? -EBUSY : 0; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 311 | } |
| 312 | |
| 313 | static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
| 314 | uint64_t pfns, uint64_t cpu_flags, |
| 315 | bool *fault, bool *write_fault) |
| 316 | { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 317 | struct hmm_range *range = hmm_vma_walk->range; |
| 318 | |
Christoph Hellwig | d45d464 | 2019-07-25 17:56:47 -0700 | [diff] [blame] | 319 | if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 320 | return; |
| 321 | |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 322 | /* |
| 323 | * So we not only consider the individual per page request we also |
| 324 | * consider the default flags requested for the range. The API can |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 325 | * be used 2 ways. The first one where the HMM user coalesces |
| 326 | * multiple page faults into one request and sets flags per pfn for |
| 327 | * those faults. The second one where the HMM user wants to pre- |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 328 | * fault a range with specific flags. For the latter one it is a |
| 329 | * waste to have the user pre-fill the pfn arrays with a default |
| 330 | * flags value. |
| 331 | */ |
| 332 | pfns = (pfns & range->pfn_flags_mask) | range->default_flags; |
| 333 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 334 | /* We aren't ask to do anything ... */ |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 335 | if (!(pfns & range->flags[HMM_PFN_VALID])) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 336 | return; |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 337 | /* If this is device memory then only fault if explicitly requested */ |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 338 | if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { |
| 339 | /* Do we fault on device memory ? */ |
| 340 | if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { |
| 341 | *write_fault = pfns & range->flags[HMM_PFN_WRITE]; |
| 342 | *fault = true; |
| 343 | } |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 344 | return; |
| 345 | } |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 346 | |
| 347 | /* If CPU page table is not valid then we need to fault */ |
| 348 | *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); |
| 349 | /* Need to write fault ? */ |
| 350 | if ((pfns & range->flags[HMM_PFN_WRITE]) && |
| 351 | !(cpu_flags & range->flags[HMM_PFN_WRITE])) { |
| 352 | *write_fault = true; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 353 | *fault = true; |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
| 358 | const uint64_t *pfns, unsigned long npages, |
| 359 | uint64_t cpu_flags, bool *fault, |
| 360 | bool *write_fault) |
| 361 | { |
| 362 | unsigned long i; |
| 363 | |
Christoph Hellwig | d45d464 | 2019-07-25 17:56:47 -0700 | [diff] [blame] | 364 | if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) { |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 365 | *fault = *write_fault = false; |
| 366 | return; |
| 367 | } |
| 368 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 369 | *fault = *write_fault = false; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 370 | for (i = 0; i < npages; ++i) { |
| 371 | hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, |
| 372 | fault, write_fault); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 373 | if ((*write_fault)) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 374 | return; |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, |
| 379 | struct mm_walk *walk) |
| 380 | { |
| 381 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 382 | struct hmm_range *range = hmm_vma_walk->range; |
| 383 | bool fault, write_fault; |
| 384 | unsigned long i, npages; |
| 385 | uint64_t *pfns; |
| 386 | |
| 387 | i = (addr - range->start) >> PAGE_SHIFT; |
| 388 | npages = (end - addr) >> PAGE_SHIFT; |
| 389 | pfns = &range->pfns[i]; |
| 390 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, |
| 391 | 0, &fault, &write_fault); |
| 392 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
| 393 | } |
| 394 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 395 | static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 396 | { |
| 397 | if (pmd_protnone(pmd)) |
| 398 | return 0; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 399 | return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | |
| 400 | range->flags[HMM_PFN_WRITE] : |
| 401 | range->flags[HMM_PFN_VALID]; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 402 | } |
| 403 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 404 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 405 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
| 406 | unsigned long end, uint64_t *pfns, pmd_t pmd) |
| 407 | { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 408 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 409 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 410 | unsigned long pfn, npages, i; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 411 | bool fault, write_fault; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 412 | uint64_t cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 413 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 414 | npages = (end - addr) >> PAGE_SHIFT; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 415 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 416 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, |
| 417 | &fault, &write_fault); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 418 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 419 | if (pmd_protnone(pmd) || fault || write_fault) |
| 420 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 421 | |
Christoph Hellwig | 309f9a4 | 2019-08-06 19:05:47 +0300 | [diff] [blame] | 422 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 423 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { |
| 424 | if (pmd_devmap(pmd)) { |
| 425 | hmm_vma_walk->pgmap = get_dev_pagemap(pfn, |
| 426 | hmm_vma_walk->pgmap); |
| 427 | if (unlikely(!hmm_vma_walk->pgmap)) |
| 428 | return -EBUSY; |
| 429 | } |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 430 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 431 | } |
| 432 | if (hmm_vma_walk->pgmap) { |
| 433 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 434 | hmm_vma_walk->pgmap = NULL; |
| 435 | } |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 436 | hmm_vma_walk->last = end; |
| 437 | return 0; |
| 438 | } |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 439 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 440 | /* stub to allow the code below to compile */ |
| 441 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
| 442 | unsigned long end, uint64_t *pfns, pmd_t pmd); |
| 443 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 444 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 445 | static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 446 | { |
Philip Yang | 789c2af | 2019-05-23 16:32:31 -0400 | [diff] [blame] | 447 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 448 | return 0; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 449 | return pte_write(pte) ? range->flags[HMM_PFN_VALID] | |
| 450 | range->flags[HMM_PFN_WRITE] : |
| 451 | range->flags[HMM_PFN_VALID]; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 452 | } |
| 453 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 454 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
| 455 | unsigned long end, pmd_t *pmdp, pte_t *ptep, |
| 456 | uint64_t *pfn) |
| 457 | { |
| 458 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 459 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 460 | bool fault, write_fault; |
| 461 | uint64_t cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 462 | pte_t pte = *ptep; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 463 | uint64_t orig_pfn = *pfn; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 464 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 465 | *pfn = range->values[HMM_PFN_NONE]; |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 466 | fault = write_fault = false; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 467 | |
| 468 | if (pte_none(pte)) { |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 469 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, |
| 470 | &fault, &write_fault); |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 471 | if (fault || write_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 472 | goto fault; |
| 473 | return 0; |
| 474 | } |
| 475 | |
| 476 | if (!pte_present(pte)) { |
| 477 | swp_entry_t entry = pte_to_swp_entry(pte); |
| 478 | |
| 479 | if (!non_swap_entry(entry)) { |
Yang, Philip | e3fe8e5 | 2019-08-15 20:52:56 +0000 | [diff] [blame] | 480 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
| 481 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 482 | &fault, &write_fault); |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 483 | if (fault || write_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 484 | goto fault; |
| 485 | return 0; |
| 486 | } |
| 487 | |
| 488 | /* |
| 489 | * This is a special swap entry, ignore migration, use |
| 490 | * device and report anything else as error. |
| 491 | */ |
| 492 | if (is_device_private_entry(entry)) { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 493 | cpu_flags = range->flags[HMM_PFN_VALID] | |
| 494 | range->flags[HMM_PFN_DEVICE_PRIVATE]; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 495 | cpu_flags |= is_write_device_private_entry(entry) ? |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 496 | range->flags[HMM_PFN_WRITE] : 0; |
| 497 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 498 | &fault, &write_fault); |
| 499 | if (fault || write_fault) |
| 500 | goto fault; |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 501 | *pfn = hmm_device_entry_from_pfn(range, |
| 502 | swp_offset(entry)); |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 503 | *pfn |= cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 504 | return 0; |
| 505 | } |
| 506 | |
| 507 | if (is_migration_entry(entry)) { |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 508 | if (fault || write_fault) { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 509 | pte_unmap(ptep); |
| 510 | hmm_vma_walk->last = addr; |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 511 | migration_entry_wait(walk->mm, pmdp, addr); |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 512 | return -EBUSY; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 513 | } |
| 514 | return 0; |
| 515 | } |
| 516 | |
| 517 | /* Report error for everything else */ |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 518 | *pfn = range->values[HMM_PFN_ERROR]; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 519 | return -EFAULT; |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 520 | } else { |
| 521 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
| 522 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 523 | &fault, &write_fault); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 524 | } |
| 525 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 526 | if (fault || write_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 527 | goto fault; |
| 528 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 529 | if (pte_devmap(pte)) { |
| 530 | hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), |
| 531 | hmm_vma_walk->pgmap); |
| 532 | if (unlikely(!hmm_vma_walk->pgmap)) |
| 533 | return -EBUSY; |
| 534 | } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { |
| 535 | *pfn = range->values[HMM_PFN_SPECIAL]; |
| 536 | return -EFAULT; |
| 537 | } |
| 538 | |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 539 | *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 540 | return 0; |
| 541 | |
| 542 | fault: |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 543 | if (hmm_vma_walk->pgmap) { |
| 544 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 545 | hmm_vma_walk->pgmap = NULL; |
| 546 | } |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 547 | pte_unmap(ptep); |
| 548 | /* Fault any virtual address we were asked to fault */ |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 549 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 550 | } |
| 551 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 552 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
| 553 | unsigned long start, |
| 554 | unsigned long end, |
| 555 | struct mm_walk *walk) |
| 556 | { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 557 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 558 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 559 | uint64_t *pfns = range->pfns; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 560 | unsigned long addr = start, i; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 561 | pte_t *ptep; |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 562 | pmd_t pmd; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 563 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 564 | again: |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 565 | pmd = READ_ONCE(*pmdp); |
| 566 | if (pmd_none(pmd)) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 567 | return hmm_vma_walk_hole(start, end, walk); |
| 568 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 569 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
| 570 | bool fault, write_fault; |
| 571 | unsigned long npages; |
| 572 | uint64_t *pfns; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 573 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 574 | i = (addr - range->start) >> PAGE_SHIFT; |
| 575 | npages = (end - addr) >> PAGE_SHIFT; |
| 576 | pfns = &range->pfns[i]; |
| 577 | |
| 578 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, |
| 579 | 0, &fault, &write_fault); |
| 580 | if (fault || write_fault) { |
| 581 | hmm_vma_walk->last = addr; |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 582 | pmd_migration_entry_wait(walk->mm, pmdp); |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 583 | return -EBUSY; |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 584 | } |
| 585 | return 0; |
| 586 | } else if (!pmd_present(pmd)) |
| 587 | return hmm_pfns_bad(start, end, walk); |
| 588 | |
| 589 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 590 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 591 | * No need to take pmd_lock here, even if some other thread |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 592 | * is splitting the huge pmd we will get that event through |
| 593 | * mmu_notifier callback. |
| 594 | * |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 595 | * So just read pmd value and check again it's a transparent |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 596 | * huge or device mapping one and compute corresponding pfn |
| 597 | * values. |
| 598 | */ |
| 599 | pmd = pmd_read_atomic(pmdp); |
| 600 | barrier(); |
| 601 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) |
| 602 | goto again; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 603 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 604 | i = (addr - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 605 | return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 606 | } |
| 607 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 608 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 609 | * We have handled all the valid cases above ie either none, migration, |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 610 | * huge or transparent huge. At this point either it is a valid pmd |
| 611 | * entry pointing to pte directory or it is a bad pmd that will not |
| 612 | * recover. |
| 613 | */ |
| 614 | if (pmd_bad(pmd)) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 615 | return hmm_pfns_bad(start, end, walk); |
| 616 | |
| 617 | ptep = pte_offset_map(pmdp, addr); |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 618 | i = (addr - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 619 | for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 620 | int r; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 621 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 622 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); |
| 623 | if (r) { |
| 624 | /* hmm_vma_handle_pte() did unmap pte directory */ |
| 625 | hmm_vma_walk->last = addr; |
| 626 | return r; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 627 | } |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 628 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 629 | if (hmm_vma_walk->pgmap) { |
| 630 | /* |
| 631 | * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() |
| 632 | * so that we can leverage get_dev_pagemap() optimization which |
| 633 | * will not re-take a reference on a pgmap if we already have |
| 634 | * one. |
| 635 | */ |
| 636 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 637 | hmm_vma_walk->pgmap = NULL; |
| 638 | } |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 639 | pte_unmap(ptep - 1); |
| 640 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 641 | hmm_vma_walk->last = addr; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 642 | return 0; |
| 643 | } |
| 644 | |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 645 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ |
| 646 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) |
| 647 | static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) |
| 648 | { |
| 649 | if (!pud_present(pud)) |
| 650 | return 0; |
| 651 | return pud_write(pud) ? range->flags[HMM_PFN_VALID] | |
| 652 | range->flags[HMM_PFN_WRITE] : |
| 653 | range->flags[HMM_PFN_VALID]; |
| 654 | } |
| 655 | |
| 656 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, |
| 657 | struct mm_walk *walk) |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 658 | { |
| 659 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 660 | struct hmm_range *range = hmm_vma_walk->range; |
| 661 | unsigned long addr = start, next; |
| 662 | pmd_t *pmdp; |
| 663 | pud_t pud; |
| 664 | int ret; |
| 665 | |
| 666 | again: |
| 667 | pud = READ_ONCE(*pudp); |
| 668 | if (pud_none(pud)) |
| 669 | return hmm_vma_walk_hole(start, end, walk); |
| 670 | |
| 671 | if (pud_huge(pud) && pud_devmap(pud)) { |
| 672 | unsigned long i, npages, pfn; |
| 673 | uint64_t *pfns, cpu_flags; |
| 674 | bool fault, write_fault; |
| 675 | |
| 676 | if (!pud_present(pud)) |
| 677 | return hmm_vma_walk_hole(start, end, walk); |
| 678 | |
| 679 | i = (addr - range->start) >> PAGE_SHIFT; |
| 680 | npages = (end - addr) >> PAGE_SHIFT; |
| 681 | pfns = &range->pfns[i]; |
| 682 | |
| 683 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); |
| 684 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, |
| 685 | cpu_flags, &fault, &write_fault); |
| 686 | if (fault || write_fault) |
| 687 | return hmm_vma_walk_hole_(addr, end, fault, |
| 688 | write_fault, walk); |
| 689 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 690 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
| 691 | for (i = 0; i < npages; ++i, ++pfn) { |
| 692 | hmm_vma_walk->pgmap = get_dev_pagemap(pfn, |
| 693 | hmm_vma_walk->pgmap); |
| 694 | if (unlikely(!hmm_vma_walk->pgmap)) |
| 695 | return -EBUSY; |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 696 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
| 697 | cpu_flags; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 698 | } |
| 699 | if (hmm_vma_walk->pgmap) { |
| 700 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 701 | hmm_vma_walk->pgmap = NULL; |
| 702 | } |
| 703 | hmm_vma_walk->last = end; |
| 704 | return 0; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 705 | } |
| 706 | |
| 707 | split_huge_pud(walk->vma, pudp, addr); |
| 708 | if (pud_none(*pudp)) |
| 709 | goto again; |
| 710 | |
| 711 | pmdp = pmd_offset(pudp, addr); |
| 712 | do { |
| 713 | next = pmd_addr_end(addr, end); |
| 714 | ret = hmm_vma_walk_pmd(pmdp, addr, next, walk); |
| 715 | if (ret) |
| 716 | return ret; |
| 717 | } while (pmdp++, addr = next, addr != end); |
| 718 | |
| 719 | return 0; |
| 720 | } |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 721 | #else |
| 722 | #define hmm_vma_walk_pud NULL |
| 723 | #endif |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 724 | |
Christoph Hellwig | 251bbe5 | 2019-08-06 19:05:50 +0300 | [diff] [blame] | 725 | #ifdef CONFIG_HUGETLB_PAGE |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 726 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
| 727 | unsigned long start, unsigned long end, |
| 728 | struct mm_walk *walk) |
| 729 | { |
Christoph Hellwig | 05c23af | 2019-08-06 19:05:46 +0300 | [diff] [blame] | 730 | unsigned long addr = start, i, pfn; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 731 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 732 | struct hmm_range *range = hmm_vma_walk->range; |
| 733 | struct vm_area_struct *vma = walk->vma; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 734 | uint64_t orig_pfn, cpu_flags; |
| 735 | bool fault, write_fault; |
| 736 | spinlock_t *ptl; |
| 737 | pte_t entry; |
| 738 | int ret = 0; |
| 739 | |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 740 | ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 741 | entry = huge_ptep_get(pte); |
| 742 | |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 743 | i = (start - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 744 | orig_pfn = range->pfns[i]; |
| 745 | range->pfns[i] = range->values[HMM_PFN_NONE]; |
| 746 | cpu_flags = pte_to_hmm_pfn_flags(range, entry); |
| 747 | fault = write_fault = false; |
| 748 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 749 | &fault, &write_fault); |
| 750 | if (fault || write_fault) { |
| 751 | ret = -ENOENT; |
| 752 | goto unlock; |
| 753 | } |
| 754 | |
Christoph Hellwig | 05c23af | 2019-08-06 19:05:46 +0300 | [diff] [blame] | 755 | pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 756 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 757 | range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
| 758 | cpu_flags; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 759 | hmm_vma_walk->last = end; |
| 760 | |
| 761 | unlock: |
| 762 | spin_unlock(ptl); |
| 763 | |
| 764 | if (ret == -ENOENT) |
| 765 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
| 766 | |
| 767 | return ret; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 768 | } |
Christoph Hellwig | 251bbe5 | 2019-08-06 19:05:50 +0300 | [diff] [blame] | 769 | #else |
| 770 | #define hmm_vma_walk_hugetlb_entry NULL |
| 771 | #endif /* CONFIG_HUGETLB_PAGE */ |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 772 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 773 | static void hmm_pfns_clear(struct hmm_range *range, |
| 774 | uint64_t *pfns, |
Jérôme Glisse | 33cd47d | 2018-04-10 16:28:54 -0700 | [diff] [blame] | 775 | unsigned long addr, |
| 776 | unsigned long end) |
| 777 | { |
| 778 | for (; addr < end; addr += PAGE_SIZE, pfns++) |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 779 | *pfns = range->values[HMM_PFN_NONE]; |
Jérôme Glisse | 33cd47d | 2018-04-10 16:28:54 -0700 | [diff] [blame] | 780 | } |
| 781 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 782 | /* |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 783 | * hmm_range_register() - start tracking change to CPU page table over a range |
Jérôme Glisse | 25f23a0 | 2019-05-13 17:19:55 -0700 | [diff] [blame] | 784 | * @range: range |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 785 | * @mm: the mm struct for the range of virtual address |
Christoph Hellwig | fac555a | 2019-08-06 19:05:44 +0300 | [diff] [blame] | 786 | * |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 787 | * Return: 0 on success, -EFAULT if the address space is no longer valid |
Jérôme Glisse | 25f23a0 | 2019-05-13 17:19:55 -0700 | [diff] [blame] | 788 | * |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 789 | * Track updates to the CPU page table see include/linux/hmm.h |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 790 | */ |
Christoph Hellwig | fac555a | 2019-08-06 19:05:44 +0300 | [diff] [blame] | 791 | int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 792 | { |
Jason Gunthorpe | e36acfe | 2019-05-23 09:41:19 -0300 | [diff] [blame] | 793 | struct hmm *hmm = mirror->hmm; |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 794 | unsigned long flags; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 795 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 796 | range->valid = false; |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 797 | range->hmm = NULL; |
| 798 | |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 799 | if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1))) |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 800 | return -EINVAL; |
Christoph Hellwig | fac555a | 2019-08-06 19:05:44 +0300 | [diff] [blame] | 801 | if (range->start >= range->end) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 802 | return -EINVAL; |
| 803 | |
Jason Gunthorpe | 47f2459 | 2019-05-23 11:08:28 -0300 | [diff] [blame] | 804 | /* Prevent hmm_release() from running while the range is valid */ |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 805 | if (!mmget_not_zero(hmm->mmu_notifier.mm)) |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 806 | return -EFAULT; |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 807 | |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 808 | /* Initialize range to track CPU page table updates. */ |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 809 | spin_lock_irqsave(&hmm->ranges_lock, flags); |
Jérôme Glisse | 86586a4 | 2018-04-10 16:28:34 -0700 | [diff] [blame] | 810 | |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 811 | range->hmm = hmm; |
Jason Gunthorpe | 157816f | 2019-05-23 11:43:43 -0300 | [diff] [blame] | 812 | list_add(&range->list, &hmm->ranges); |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 813 | |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 814 | /* |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 815 | * If there are any concurrent notifiers we have to wait for them for |
| 816 | * the range to be valid (see hmm_range_wait_until_valid()). |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 817 | */ |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 818 | if (!hmm->notifiers) |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 819 | range->valid = true; |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 820 | spin_unlock_irqrestore(&hmm->ranges_lock, flags); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 821 | |
| 822 | return 0; |
| 823 | } |
| 824 | EXPORT_SYMBOL(hmm_range_register); |
| 825 | |
| 826 | /* |
| 827 | * hmm_range_unregister() - stop tracking change to CPU page table over a range |
| 828 | * @range: range |
| 829 | * |
| 830 | * Range struct is used to track updates to the CPU page table after a call to |
| 831 | * hmm_range_register(). See include/linux/hmm.h for how to use it. |
| 832 | */ |
| 833 | void hmm_range_unregister(struct hmm_range *range) |
| 834 | { |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 835 | struct hmm *hmm = range->hmm; |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 836 | unsigned long flags; |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 837 | |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 838 | spin_lock_irqsave(&hmm->ranges_lock, flags); |
Jason Gunthorpe | 47f2459 | 2019-05-23 11:08:28 -0300 | [diff] [blame] | 839 | list_del_init(&range->list); |
Jason Gunthorpe | 5a136b4 | 2019-06-07 12:10:33 -0300 | [diff] [blame] | 840 | spin_unlock_irqrestore(&hmm->ranges_lock, flags); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 841 | |
| 842 | /* Drop reference taken by hmm_range_register() */ |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 843 | mmput(hmm->mmu_notifier.mm); |
Jason Gunthorpe | 2dcc3eb | 2019-05-23 11:40:24 -0300 | [diff] [blame] | 844 | |
| 845 | /* |
| 846 | * The range is now invalid and the ref on the hmm is dropped, so |
| 847 | * poison the pointer. Leave other fields in place, for the caller's |
| 848 | * use. |
| 849 | */ |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 850 | range->valid = false; |
Jason Gunthorpe | 2dcc3eb | 2019-05-23 11:40:24 -0300 | [diff] [blame] | 851 | memset(&range->hmm, POISON_INUSE, sizeof(range->hmm)); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 852 | } |
| 853 | EXPORT_SYMBOL(hmm_range_unregister); |
| 854 | |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 855 | /** |
| 856 | * hmm_range_fault - try to fault some address in a virtual address range |
| 857 | * @range: range being faulted |
| 858 | * @flags: HMM_FAULT_* flags |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 859 | * |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 860 | * Return: the number of valid pages in range->pfns[] (from range start |
| 861 | * address), which may be zero. On error one of the following status codes |
| 862 | * can be returned: |
| 863 | * |
| 864 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma |
| 865 | * (e.g., device file vma). |
| 866 | * -ENOMEM: Out of memory. |
| 867 | * -EPERM: Invalid permission (e.g., asking for write and range is read |
| 868 | * only). |
| 869 | * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped. |
| 870 | * -EBUSY: The range has been invalidated and the caller needs to wait for |
| 871 | * the invalidation to finish. |
| 872 | * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access |
| 873 | * that range) number of valid pages in range->pfns[] (from |
| 874 | * range start address). |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 875 | * |
| 876 | * This is similar to a regular CPU page fault except that it will not trigger |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 877 | * any memory migration if the memory being faulted is not accessible by CPUs |
| 878 | * and caller does not ask for migration. |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 879 | * |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 880 | * On error, for one virtual address in the range, the function will mark the |
| 881 | * corresponding HMM pfn entry with an error flag. |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 882 | */ |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 883 | long hmm_range_fault(struct hmm_range *range, unsigned int flags) |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 884 | { |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 885 | const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 886 | unsigned long start = range->start, end; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 887 | struct hmm_vma_walk hmm_vma_walk; |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 888 | struct hmm *hmm = range->hmm; |
| 889 | struct vm_area_struct *vma; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 890 | struct mm_walk mm_walk; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 891 | int ret; |
| 892 | |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 893 | lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem); |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 894 | |
| 895 | do { |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 896 | /* If range is no longer valid force retry. */ |
Christoph Hellwig | 2bcbeae | 2019-07-24 08:52:52 +0200 | [diff] [blame] | 897 | if (!range->valid) |
| 898 | return -EBUSY; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 899 | |
Jason Gunthorpe | c7d8b78 | 2019-08-06 20:15:42 -0300 | [diff] [blame] | 900 | vma = find_vma(hmm->mmu_notifier.mm, start); |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 901 | if (vma == NULL || (vma->vm_flags & device_vma)) |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 902 | return -EFAULT; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 903 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 904 | if (!(vma->vm_flags & VM_READ)) { |
| 905 | /* |
| 906 | * If vma do not allow read access, then assume that it |
| 907 | * does not allow write access, either. HMM does not |
| 908 | * support architecture that allow write without read. |
| 909 | */ |
| 910 | hmm_pfns_clear(range, range->pfns, |
| 911 | range->start, range->end); |
| 912 | return -EPERM; |
| 913 | } |
| 914 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 915 | hmm_vma_walk.pgmap = NULL; |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 916 | hmm_vma_walk.last = start; |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 917 | hmm_vma_walk.flags = flags; |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 918 | hmm_vma_walk.range = range; |
| 919 | mm_walk.private = &hmm_vma_walk; |
| 920 | end = min(range->end, vma->vm_end); |
| 921 | |
| 922 | mm_walk.vma = vma; |
| 923 | mm_walk.mm = vma->vm_mm; |
| 924 | mm_walk.pte_entry = NULL; |
| 925 | mm_walk.test_walk = NULL; |
| 926 | mm_walk.hugetlb_entry = NULL; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 927 | mm_walk.pud_entry = hmm_vma_walk_pud; |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 928 | mm_walk.pmd_entry = hmm_vma_walk_pmd; |
| 929 | mm_walk.pte_hole = hmm_vma_walk_hole; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 930 | mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry; |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 931 | |
| 932 | do { |
| 933 | ret = walk_page_range(start, end, &mm_walk); |
| 934 | start = hmm_vma_walk.last; |
| 935 | |
| 936 | /* Keep trying while the range is valid. */ |
| 937 | } while (ret == -EBUSY && range->valid); |
| 938 | |
| 939 | if (ret) { |
| 940 | unsigned long i; |
| 941 | |
| 942 | i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; |
| 943 | hmm_pfns_clear(range, &range->pfns[i], |
| 944 | hmm_vma_walk.last, range->end); |
| 945 | return ret; |
| 946 | } |
| 947 | start = end; |
| 948 | |
| 949 | } while (start < range->end); |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 950 | |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 951 | return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 952 | } |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 953 | EXPORT_SYMBOL(hmm_range_fault); |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 954 | |
| 955 | /** |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 956 | * hmm_range_dma_map - hmm_range_fault() and dma map page all in one. |
| 957 | * @range: range being faulted |
| 958 | * @device: device to map page to |
| 959 | * @daddrs: array of dma addresses for the mapped pages |
| 960 | * @flags: HMM_FAULT_* |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 961 | * |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 962 | * Return: the number of pages mapped on success (including zero), or any |
| 963 | * status return from hmm_range_fault() otherwise. |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 964 | */ |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 965 | long hmm_range_dma_map(struct hmm_range *range, struct device *device, |
| 966 | dma_addr_t *daddrs, unsigned int flags) |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 967 | { |
| 968 | unsigned long i, npages, mapped; |
| 969 | long ret; |
| 970 | |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 971 | ret = hmm_range_fault(range, flags); |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 972 | if (ret <= 0) |
| 973 | return ret ? ret : -EBUSY; |
| 974 | |
| 975 | npages = (range->end - range->start) >> PAGE_SHIFT; |
| 976 | for (i = 0, mapped = 0; i < npages; ++i) { |
| 977 | enum dma_data_direction dir = DMA_TO_DEVICE; |
| 978 | struct page *page; |
| 979 | |
| 980 | /* |
| 981 | * FIXME need to update DMA API to provide invalid DMA address |
| 982 | * value instead of a function to test dma address value. This |
| 983 | * would remove lot of dumb code duplicated accross many arch. |
| 984 | * |
| 985 | * For now setting it to 0 here is good enough as the pfns[] |
| 986 | * value is what is use to check what is valid and what isn't. |
| 987 | */ |
| 988 | daddrs[i] = 0; |
| 989 | |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 990 | page = hmm_device_entry_to_page(range, range->pfns[i]); |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 991 | if (page == NULL) |
| 992 | continue; |
| 993 | |
| 994 | /* Check if range is being invalidated */ |
| 995 | if (!range->valid) { |
| 996 | ret = -EBUSY; |
| 997 | goto unmap; |
| 998 | } |
| 999 | |
| 1000 | /* If it is read and write than map bi-directional. */ |
| 1001 | if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) |
| 1002 | dir = DMA_BIDIRECTIONAL; |
| 1003 | |
| 1004 | daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir); |
| 1005 | if (dma_mapping_error(device, daddrs[i])) { |
| 1006 | ret = -EFAULT; |
| 1007 | goto unmap; |
| 1008 | } |
| 1009 | |
| 1010 | mapped++; |
| 1011 | } |
| 1012 | |
| 1013 | return mapped; |
| 1014 | |
| 1015 | unmap: |
| 1016 | for (npages = i, i = 0; (i < npages) && mapped; ++i) { |
| 1017 | enum dma_data_direction dir = DMA_TO_DEVICE; |
| 1018 | struct page *page; |
| 1019 | |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 1020 | page = hmm_device_entry_to_page(range, range->pfns[i]); |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 1021 | if (page == NULL) |
| 1022 | continue; |
| 1023 | |
| 1024 | if (dma_mapping_error(device, daddrs[i])) |
| 1025 | continue; |
| 1026 | |
| 1027 | /* If it is read and write than map bi-directional. */ |
| 1028 | if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) |
| 1029 | dir = DMA_BIDIRECTIONAL; |
| 1030 | |
| 1031 | dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); |
| 1032 | mapped--; |
| 1033 | } |
| 1034 | |
| 1035 | return ret; |
| 1036 | } |
| 1037 | EXPORT_SYMBOL(hmm_range_dma_map); |
| 1038 | |
| 1039 | /** |
| 1040 | * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map() |
| 1041 | * @range: range being unmapped |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 1042 | * @device: device against which dma map was done |
| 1043 | * @daddrs: dma address of mapped pages |
| 1044 | * @dirty: dirty page if it had the write flag set |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 1045 | * Return: number of page unmapped on success, -EINVAL otherwise |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 1046 | * |
| 1047 | * Note that caller MUST abide by mmu notifier or use HMM mirror and abide |
| 1048 | * to the sync_cpu_device_pagetables() callback so that it is safe here to |
| 1049 | * call set_page_dirty(). Caller must also take appropriate locks to avoid |
| 1050 | * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress. |
| 1051 | */ |
| 1052 | long hmm_range_dma_unmap(struct hmm_range *range, |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 1053 | struct device *device, |
| 1054 | dma_addr_t *daddrs, |
| 1055 | bool dirty) |
| 1056 | { |
| 1057 | unsigned long i, npages; |
| 1058 | long cpages = 0; |
| 1059 | |
| 1060 | /* Sanity check. */ |
| 1061 | if (range->end <= range->start) |
| 1062 | return -EINVAL; |
| 1063 | if (!daddrs) |
| 1064 | return -EINVAL; |
| 1065 | if (!range->pfns) |
| 1066 | return -EINVAL; |
| 1067 | |
| 1068 | npages = (range->end - range->start) >> PAGE_SHIFT; |
| 1069 | for (i = 0; i < npages; ++i) { |
| 1070 | enum dma_data_direction dir = DMA_TO_DEVICE; |
| 1071 | struct page *page; |
| 1072 | |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 1073 | page = hmm_device_entry_to_page(range, range->pfns[i]); |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 1074 | if (page == NULL) |
| 1075 | continue; |
| 1076 | |
| 1077 | /* If it is read and write than map bi-directional. */ |
| 1078 | if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) { |
| 1079 | dir = DMA_BIDIRECTIONAL; |
| 1080 | |
| 1081 | /* |
| 1082 | * See comments in function description on why it is |
| 1083 | * safe here to call set_page_dirty() |
| 1084 | */ |
| 1085 | if (dirty) |
| 1086 | set_page_dirty(page); |
| 1087 | } |
| 1088 | |
| 1089 | /* Unmap and clear pfns/dma address */ |
| 1090 | dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir); |
| 1091 | range->pfns[i] = range->values[HMM_PFN_NONE]; |
| 1092 | /* FIXME see comments in hmm_vma_dma_map() */ |
| 1093 | daddrs[i] = 0; |
| 1094 | cpages++; |
| 1095 | } |
| 1096 | |
| 1097 | return cpages; |
| 1098 | } |
| 1099 | EXPORT_SYMBOL(hmm_range_dma_unmap); |