Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2013 Red Hat Inc. |
| 4 | * |
Jérôme Glisse | f813f21 | 2018-10-30 15:04:06 -0700 | [diff] [blame] | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 6 | */ |
| 7 | /* |
| 8 | * Heterogeneous Memory Management (HMM) |
| 9 | * |
Mike Rapoport | ad56b73 | 2018-03-21 21:22:47 +0200 | [diff] [blame] | 10 | * See Documentation/vm/hmm.rst for reasons and overview of what HMM is and it |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 11 | * is for. Here we focus on the HMM API description, with some explanation of |
| 12 | * the underlying implementation. |
| 13 | * |
| 14 | * Short description: HMM provides a set of helpers to share a virtual address |
| 15 | * space between CPU and a device, so that the device can access any valid |
| 16 | * address of the process (while still obeying memory protection). HMM also |
| 17 | * provides helpers to migrate process memory to device memory, and back. Each |
| 18 | * set of functionality (address space mirroring, and migration to and from |
| 19 | * device memory) can be used independently of the other. |
| 20 | * |
| 21 | * |
| 22 | * HMM address space mirroring API: |
| 23 | * |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 24 | * Use HMM address space mirroring if you want to mirror a range of the CPU |
| 25 | * page tables of a process into a device page table. Here, "mirror" means "keep |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 26 | * synchronized". Prerequisites: the device must provide the ability to write- |
| 27 | * protect its page tables (at PAGE_SIZE granularity), and must be able to |
| 28 | * recover from the resulting potential page faults. |
| 29 | * |
| 30 | * HMM guarantees that at any point in time, a given virtual address points to |
| 31 | * either the same memory in both CPU and device page tables (that is: CPU and |
| 32 | * device page tables each point to the same pages), or that one page table (CPU |
| 33 | * or device) points to no entry, while the other still points to the old page |
| 34 | * for the address. The latter case happens when the CPU page table update |
| 35 | * happens first, and then the update is mirrored over to the device page table. |
| 36 | * This does not cause any issue, because the CPU page table cannot start |
| 37 | * pointing to a new page until the device page table is invalidated. |
| 38 | * |
| 39 | * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any |
| 40 | * updates to each device driver that has registered a mirror. It also provides |
| 41 | * some API calls to help with taking a snapshot of the CPU page table, and to |
| 42 | * synchronize with any updates that might happen concurrently. |
| 43 | * |
| 44 | * |
| 45 | * HMM migration to and from device memory: |
| 46 | * |
| 47 | * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with |
| 48 | * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page |
| 49 | * of the device memory, and allows the device driver to manage its memory |
| 50 | * using those struct pages. Having struct pages for device memory makes |
| 51 | * migration easier. Because that memory is not addressable by the CPU it must |
| 52 | * never be pinned to the device; in other words, any CPU page fault can always |
| 53 | * cause the device memory to be migrated (copied/moved) back to regular memory. |
| 54 | * |
| 55 | * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that |
| 56 | * allows use of a device DMA engine to perform the copy operation between |
| 57 | * regular system memory and device memory. |
| 58 | */ |
| 59 | #ifndef LINUX_HMM_H |
| 60 | #define LINUX_HMM_H |
| 61 | |
| 62 | #include <linux/kconfig.h> |
Dan Williams | 063a7d1 | 2018-12-28 00:39:46 -0800 | [diff] [blame] | 63 | #include <asm/pgtable.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 64 | |
| 65 | #if IS_ENABLED(CONFIG_HMM) |
| 66 | |
Jérôme Glisse | 858b54d | 2017-09-08 16:12:02 -0700 | [diff] [blame] | 67 | #include <linux/device.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 68 | #include <linux/migrate.h> |
| 69 | #include <linux/memremap.h> |
| 70 | #include <linux/completion.h> |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 71 | #include <linux/mmu_notifier.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 72 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 73 | |
| 74 | /* |
| 75 | * struct hmm - HMM per mm struct |
| 76 | * |
| 77 | * @mm: mm struct this HMM struct is bound to |
| 78 | * @lock: lock protecting ranges list |
| 79 | * @ranges: list of range being snapshotted |
| 80 | * @mirrors: list of mirrors for this mm |
| 81 | * @mmu_notifier: mmu notifier to track updates to CPU page table |
| 82 | * @mirrors_sem: read/write semaphore protecting the mirrors list |
| 83 | * @wq: wait queue for user waiting on a range invalidation |
| 84 | * @notifiers: count of active mmu notifiers |
| 85 | * @dead: is the mm dead ? |
| 86 | */ |
| 87 | struct hmm { |
| 88 | struct mm_struct *mm; |
| 89 | struct kref kref; |
| 90 | struct mutex lock; |
| 91 | struct list_head ranges; |
| 92 | struct list_head mirrors; |
| 93 | struct mmu_notifier mmu_notifier; |
| 94 | struct rw_semaphore mirrors_sem; |
| 95 | wait_queue_head_t wq; |
| 96 | long notifiers; |
| 97 | bool dead; |
| 98 | }; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 99 | |
| 100 | /* |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 101 | * hmm_pfn_flag_e - HMM flag enums |
| 102 | * |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 103 | * Flags: |
Jérôme Glisse | 86586a4 | 2018-04-10 16:28:34 -0700 | [diff] [blame] | 104 | * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 105 | * HMM_PFN_WRITE: CPU page table has write permission set |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 106 | * HMM_PFN_DEVICE_PRIVATE: private device memory (ZONE_DEVICE) |
| 107 | * |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 108 | * The driver provides a flags array for mapping page protections to device |
| 109 | * PTE bits. If the driver valid bit for an entry is bit 3, |
| 110 | * i.e., (entry & (1 << 3)), then the driver must provide |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 111 | * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3. |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 112 | * Same logic apply to all flags. This is the same idea as vm_page_prot in vma |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 113 | * except that this is per device driver rather than per architecture. |
| 114 | */ |
| 115 | enum hmm_pfn_flag_e { |
| 116 | HMM_PFN_VALID = 0, |
| 117 | HMM_PFN_WRITE, |
| 118 | HMM_PFN_DEVICE_PRIVATE, |
| 119 | HMM_PFN_FLAG_MAX |
| 120 | }; |
| 121 | |
| 122 | /* |
| 123 | * hmm_pfn_value_e - HMM pfn special value |
| 124 | * |
| 125 | * Flags: |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 126 | * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 127 | * HMM_PFN_NONE: corresponding CPU page table entry is pte_none() |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 128 | * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the |
Matthew Wilcox | 67fa166 | 2018-10-26 15:04:26 -0700 | [diff] [blame] | 129 | * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 130 | * be mirrored by a device, because the entry will never have HMM_PFN_VALID |
| 131 | * set and the pfn value is undefined. |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 132 | * |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 133 | * Driver provides values for none entry, error entry, and special entry. |
| 134 | * Driver can alias (i.e., use same value) error and special, but |
| 135 | * it should not alias none with error or special. |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 136 | * |
| 137 | * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be: |
| 138 | * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous, |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 139 | * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry, |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 140 | * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 141 | */ |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 142 | enum hmm_pfn_value_e { |
| 143 | HMM_PFN_ERROR, |
| 144 | HMM_PFN_NONE, |
| 145 | HMM_PFN_SPECIAL, |
| 146 | HMM_PFN_VALUE_MAX |
| 147 | }; |
| 148 | |
| 149 | /* |
| 150 | * struct hmm_range - track invalidation lock on virtual address range |
| 151 | * |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 152 | * @hmm: the core HMM structure this range is active against |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 153 | * @vma: the vm area struct for the range |
| 154 | * @list: all range lock are on a list |
| 155 | * @start: range virtual start address (inclusive) |
| 156 | * @end: range virtual end address (exclusive) |
| 157 | * @pfns: array of pfns (big enough for the range) |
| 158 | * @flags: pfn flags to match device driver page table |
| 159 | * @values: pfn value for some special case (none, special, error, ...) |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 160 | * @default_flags: default flags for the range (write, read, ... see hmm doc) |
| 161 | * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 162 | * @page_shift: device virtual address shift value (should be >= PAGE_SHIFT) |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 163 | * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) |
| 164 | * @valid: pfns array did not change since it has been fill by an HMM function |
| 165 | */ |
| 166 | struct hmm_range { |
Jérôme Glisse | 704f3f2 | 2019-05-13 17:19:48 -0700 | [diff] [blame] | 167 | struct hmm *hmm; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 168 | struct vm_area_struct *vma; |
| 169 | struct list_head list; |
| 170 | unsigned long start; |
| 171 | unsigned long end; |
| 172 | uint64_t *pfns; |
| 173 | const uint64_t *flags; |
| 174 | const uint64_t *values; |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 175 | uint64_t default_flags; |
| 176 | uint64_t pfn_flags_mask; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 177 | uint8_t page_shift; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 178 | uint8_t pfn_shift; |
| 179 | bool valid; |
| 180 | }; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 181 | |
| 182 | /* |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 183 | * hmm_range_page_shift() - return the page shift for the range |
| 184 | * @range: range being queried |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 185 | * Return: page shift (page size = 1 << page shift) for the range |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 186 | */ |
| 187 | static inline unsigned hmm_range_page_shift(const struct hmm_range *range) |
| 188 | { |
| 189 | return range->page_shift; |
| 190 | } |
| 191 | |
| 192 | /* |
| 193 | * hmm_range_page_size() - return the page size for the range |
| 194 | * @range: range being queried |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 195 | * Return: page size for the range in bytes |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 196 | */ |
| 197 | static inline unsigned long hmm_range_page_size(const struct hmm_range *range) |
| 198 | { |
| 199 | return 1UL << hmm_range_page_shift(range); |
| 200 | } |
| 201 | |
| 202 | /* |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 203 | * hmm_range_wait_until_valid() - wait for range to be valid |
| 204 | * @range: range affected by invalidation to wait on |
| 205 | * @timeout: time out for wait in ms (ie abort wait after that period of time) |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 206 | * Return: true if the range is valid, false otherwise. |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 207 | */ |
| 208 | static inline bool hmm_range_wait_until_valid(struct hmm_range *range, |
| 209 | unsigned long timeout) |
| 210 | { |
| 211 | /* Check if mm is dead ? */ |
| 212 | if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) { |
| 213 | range->valid = false; |
| 214 | return false; |
| 215 | } |
| 216 | if (range->valid) |
| 217 | return true; |
| 218 | wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead, |
| 219 | msecs_to_jiffies(timeout)); |
| 220 | /* Return current valid status just in case we get lucky */ |
| 221 | return range->valid; |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * hmm_range_valid() - test if a range is valid or not |
| 226 | * @range: range |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 227 | * Return: true if the range is valid, false otherwise. |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 228 | */ |
| 229 | static inline bool hmm_range_valid(struct hmm_range *range) |
| 230 | { |
| 231 | return range->valid; |
| 232 | } |
| 233 | |
| 234 | /* |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 235 | * hmm_device_entry_to_page() - return struct page pointed to by a device entry |
| 236 | * @range: range use to decode device entry value |
| 237 | * @entry: device entry value to get corresponding struct page from |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 238 | * Return: struct page pointer if entry is a valid, NULL otherwise |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 239 | * |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 240 | * If the device entry is valid (ie valid flag set) then return the struct page |
| 241 | * matching the entry value. Otherwise return NULL. |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 242 | */ |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 243 | static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range, |
| 244 | uint64_t entry) |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 245 | { |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 246 | if (entry == range->values[HMM_PFN_NONE]) |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 247 | return NULL; |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 248 | if (entry == range->values[HMM_PFN_ERROR]) |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 249 | return NULL; |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 250 | if (entry == range->values[HMM_PFN_SPECIAL]) |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 251 | return NULL; |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 252 | if (!(entry & range->flags[HMM_PFN_VALID])) |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 253 | return NULL; |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 254 | return pfn_to_page(entry >> range->pfn_shift); |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 255 | } |
| 256 | |
| 257 | /* |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 258 | * hmm_device_entry_to_pfn() - return pfn value store in a device entry |
| 259 | * @range: range use to decode device entry value |
| 260 | * @entry: device entry to extract pfn from |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 261 | * Return: pfn value if device entry is valid, -1UL otherwise |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 262 | */ |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 263 | static inline unsigned long |
| 264 | hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn) |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 265 | { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 266 | if (pfn == range->values[HMM_PFN_NONE]) |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 267 | return -1UL; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 268 | if (pfn == range->values[HMM_PFN_ERROR]) |
| 269 | return -1UL; |
| 270 | if (pfn == range->values[HMM_PFN_SPECIAL]) |
| 271 | return -1UL; |
| 272 | if (!(pfn & range->flags[HMM_PFN_VALID])) |
| 273 | return -1UL; |
| 274 | return (pfn >> range->pfn_shift); |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | /* |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 278 | * hmm_device_entry_from_page() - create a valid device entry for a page |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 279 | * @range: range use to encode HMM pfn value |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 280 | * @page: page for which to create the device entry |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 281 | * Return: valid device entry for the page |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 282 | */ |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 283 | static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range, |
| 284 | struct page *page) |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 285 | { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 286 | return (page_to_pfn(page) << range->pfn_shift) | |
| 287 | range->flags[HMM_PFN_VALID]; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 288 | } |
| 289 | |
| 290 | /* |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 291 | * hmm_device_entry_from_pfn() - create a valid device entry value from pfn |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 292 | * @range: range use to encode HMM pfn value |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 293 | * @pfn: pfn value for which to create the device entry |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 294 | * Return: valid device entry for the pfn |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 295 | */ |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 296 | static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range, |
| 297 | unsigned long pfn) |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 298 | { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 299 | return (pfn << range->pfn_shift) | |
| 300 | range->flags[HMM_PFN_VALID]; |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 301 | } |
| 302 | |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 303 | /* |
| 304 | * Old API: |
| 305 | * hmm_pfn_to_page() |
| 306 | * hmm_pfn_to_pfn() |
| 307 | * hmm_pfn_from_page() |
| 308 | * hmm_pfn_from_pfn() |
| 309 | * |
| 310 | * This are the OLD API please use new API, it is here to avoid cross-tree |
| 311 | * merge painfullness ie we convert things to new API in stages. |
| 312 | */ |
| 313 | static inline struct page *hmm_pfn_to_page(const struct hmm_range *range, |
| 314 | uint64_t pfn) |
| 315 | { |
| 316 | return hmm_device_entry_to_page(range, pfn); |
| 317 | } |
| 318 | |
| 319 | static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range, |
| 320 | uint64_t pfn) |
| 321 | { |
| 322 | return hmm_device_entry_to_pfn(range, pfn); |
| 323 | } |
| 324 | |
| 325 | static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range, |
| 326 | struct page *page) |
| 327 | { |
| 328 | return hmm_device_entry_from_page(range, page); |
| 329 | } |
| 330 | |
| 331 | static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range, |
| 332 | unsigned long pfn) |
| 333 | { |
| 334 | return hmm_device_entry_from_pfn(range, pfn); |
| 335 | } |
| 336 | |
| 337 | |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 338 | |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 339 | #if IS_ENABLED(CONFIG_HMM_MIRROR) |
| 340 | /* |
| 341 | * Mirroring: how to synchronize device page table with CPU page table. |
| 342 | * |
| 343 | * A device driver that is participating in HMM mirroring must always |
| 344 | * synchronize with CPU page table updates. For this, device drivers can either |
| 345 | * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device |
| 346 | * drivers can decide to register one mirror per device per process, or just |
| 347 | * one mirror per process for a group of devices. The pattern is: |
| 348 | * |
| 349 | * int device_bind_address_space(..., struct mm_struct *mm, ...) |
| 350 | * { |
| 351 | * struct device_address_space *das; |
| 352 | * |
| 353 | * // Device driver specific initialization, and allocation of das |
| 354 | * // which contains an hmm_mirror struct as one of its fields. |
| 355 | * ... |
| 356 | * |
| 357 | * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops); |
| 358 | * if (ret) { |
| 359 | * // Cleanup on error |
| 360 | * return ret; |
| 361 | * } |
| 362 | * |
| 363 | * // Other device driver specific initialization |
| 364 | * ... |
| 365 | * } |
| 366 | * |
| 367 | * Once an hmm_mirror is registered for an address space, the device driver |
| 368 | * will get callbacks through sync_cpu_device_pagetables() operation (see |
| 369 | * hmm_mirror_ops struct). |
| 370 | * |
| 371 | * Device driver must not free the struct containing the hmm_mirror struct |
| 372 | * before calling hmm_mirror_unregister(). The expected usage is to do that when |
| 373 | * the device driver is unbinding from an address space. |
| 374 | * |
| 375 | * |
| 376 | * void device_unbind_address_space(struct device_address_space *das) |
| 377 | * { |
| 378 | * // Device driver specific cleanup |
| 379 | * ... |
| 380 | * |
| 381 | * hmm_mirror_unregister(&das->mirror); |
| 382 | * |
| 383 | * // Other device driver specific cleanup, and now das can be freed |
| 384 | * ... |
| 385 | * } |
| 386 | */ |
| 387 | |
| 388 | struct hmm_mirror; |
| 389 | |
| 390 | /* |
Jérôme Glisse | 44532d4 | 2018-10-30 15:04:24 -0700 | [diff] [blame] | 391 | * enum hmm_update_event - type of update |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 392 | * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why) |
| 393 | */ |
Jérôme Glisse | 44532d4 | 2018-10-30 15:04:24 -0700 | [diff] [blame] | 394 | enum hmm_update_event { |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 395 | HMM_UPDATE_INVALIDATE, |
| 396 | }; |
| 397 | |
| 398 | /* |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 399 | * struct hmm_update - HMM update information for callback |
Jérôme Glisse | 44532d4 | 2018-10-30 15:04:24 -0700 | [diff] [blame] | 400 | * |
| 401 | * @start: virtual start address of the range to update |
| 402 | * @end: virtual end address of the range to update |
| 403 | * @event: event triggering the update (what is happening) |
| 404 | * @blockable: can the callback block/sleep ? |
| 405 | */ |
| 406 | struct hmm_update { |
| 407 | unsigned long start; |
| 408 | unsigned long end; |
| 409 | enum hmm_update_event event; |
| 410 | bool blockable; |
| 411 | }; |
| 412 | |
| 413 | /* |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 414 | * struct hmm_mirror_ops - HMM mirror device operations callback |
| 415 | * |
| 416 | * @update: callback to update range on a device |
| 417 | */ |
| 418 | struct hmm_mirror_ops { |
Ralph Campbell | e140151 | 2018-04-10 16:28:19 -0700 | [diff] [blame] | 419 | /* release() - release hmm_mirror |
| 420 | * |
| 421 | * @mirror: pointer to struct hmm_mirror |
| 422 | * |
Ralph Campbell | 2076e5c | 2019-05-06 16:29:38 -0700 | [diff] [blame] | 423 | * This is called when the mm_struct is being released. The callback |
| 424 | * must ensure that all access to any pages obtained from this mirror |
| 425 | * is halted before the callback returns. All future access should |
| 426 | * fault. |
Ralph Campbell | e140151 | 2018-04-10 16:28:19 -0700 | [diff] [blame] | 427 | */ |
| 428 | void (*release)(struct hmm_mirror *mirror); |
| 429 | |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 430 | /* sync_cpu_device_pagetables() - synchronize page tables |
| 431 | * |
| 432 | * @mirror: pointer to struct hmm_mirror |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 433 | * @update: update information (see struct hmm_update) |
| 434 | * Return: -EAGAIN if update.blockable false and callback need to |
Jérôme Glisse | 44532d4 | 2018-10-30 15:04:24 -0700 | [diff] [blame] | 435 | * block, 0 otherwise. |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 436 | * |
| 437 | * This callback ultimately originates from mmu_notifiers when the CPU |
| 438 | * page table is updated. The device driver must update its page table |
| 439 | * in response to this callback. The update argument tells what action |
| 440 | * to perform. |
| 441 | * |
| 442 | * The device driver must not return from this callback until the device |
| 443 | * page tables are completely updated (TLBs flushed, etc); this is a |
| 444 | * synchronous call. |
| 445 | */ |
Jérôme Glisse | 44532d4 | 2018-10-30 15:04:24 -0700 | [diff] [blame] | 446 | int (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror, |
| 447 | const struct hmm_update *update); |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 448 | }; |
| 449 | |
| 450 | /* |
| 451 | * struct hmm_mirror - mirror struct for a device driver |
| 452 | * |
| 453 | * @hmm: pointer to struct hmm (which is unique per mm_struct) |
| 454 | * @ops: device driver callback for HMM mirror operations |
| 455 | * @list: for list of mirrors of a given mm |
| 456 | * |
| 457 | * Each address space (mm_struct) being mirrored by a device must register one |
| 458 | * instance of an hmm_mirror struct with HMM. HMM will track the list of all |
| 459 | * mirrors for each mm_struct. |
| 460 | */ |
| 461 | struct hmm_mirror { |
| 462 | struct hmm *hmm; |
| 463 | const struct hmm_mirror_ops *ops; |
| 464 | struct list_head list; |
| 465 | }; |
| 466 | |
| 467 | int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm); |
| 468 | void hmm_mirror_unregister(struct hmm_mirror *mirror); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 469 | |
Jérôme Glisse | 2023941 | 2019-05-13 17:20:24 -0700 | [diff] [blame] | 470 | /* |
| 471 | * hmm_mirror_mm_is_alive() - test if mm is still alive |
| 472 | * @mirror: the HMM mm mirror for which we want to lock the mmap_sem |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 473 | * Return: false if the mm is dead, true otherwise |
Jérôme Glisse | 2023941 | 2019-05-13 17:20:24 -0700 | [diff] [blame] | 474 | * |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 475 | * This is an optimization, it will not always accurately return false if the |
| 476 | * mm is dead; i.e., there can be false negatives (process is being killed but |
| 477 | * HMM is not yet informed of that). It is only intended to be used to optimize |
| 478 | * out cases where the driver is about to do something time consuming and it |
| 479 | * would be better to skip it if the mm is dead. |
Jérôme Glisse | 2023941 | 2019-05-13 17:20:24 -0700 | [diff] [blame] | 480 | */ |
| 481 | static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror) |
| 482 | { |
| 483 | struct mm_struct *mm; |
| 484 | |
| 485 | if (!mirror || !mirror->hmm) |
| 486 | return false; |
| 487 | mm = READ_ONCE(mirror->hmm->mm); |
| 488 | if (mirror->hmm->dead || !mm) |
| 489 | return false; |
| 490 | |
| 491 | return true; |
| 492 | } |
| 493 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 494 | /* |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 495 | * Please see Documentation/vm/hmm.rst for how to use the range API. |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 496 | */ |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 497 | int hmm_range_register(struct hmm_range *range, |
| 498 | struct mm_struct *mm, |
| 499 | unsigned long start, |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 500 | unsigned long end, |
| 501 | unsigned page_shift); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 502 | void hmm_range_unregister(struct hmm_range *range); |
Jérôme Glisse | 25f23a0 | 2019-05-13 17:19:55 -0700 | [diff] [blame] | 503 | long hmm_range_snapshot(struct hmm_range *range); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 504 | long hmm_range_fault(struct hmm_range *range, bool block); |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 505 | long hmm_range_dma_map(struct hmm_range *range, |
| 506 | struct device *device, |
| 507 | dma_addr_t *daddrs, |
| 508 | bool block); |
| 509 | long hmm_range_dma_unmap(struct hmm_range *range, |
| 510 | struct vm_area_struct *vma, |
| 511 | struct device *device, |
| 512 | dma_addr_t *daddrs, |
| 513 | bool dirty); |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 514 | |
| 515 | /* |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 516 | * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 517 | * |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 518 | * When waiting for mmu notifiers we need some kind of time out otherwise we |
| 519 | * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to |
| 520 | * wait already. |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 521 | */ |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 522 | #define HMM_RANGE_DEFAULT_TIMEOUT 1000 |
| 523 | |
| 524 | /* This is a temporary helper to avoid merge conflict between trees. */ |
| 525 | static inline bool hmm_vma_range_done(struct hmm_range *range) |
| 526 | { |
| 527 | bool ret = hmm_range_valid(range); |
| 528 | |
| 529 | hmm_range_unregister(range); |
| 530 | return ret; |
| 531 | } |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 532 | |
| 533 | /* This is a temporary helper to avoid merge conflict between trees. */ |
| 534 | static inline int hmm_vma_fault(struct hmm_range *range, bool block) |
| 535 | { |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 536 | long ret; |
| 537 | |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 538 | /* |
| 539 | * With the old API the driver must set each individual entries with |
| 540 | * the requested flags (valid, write, ...). So here we set the mask to |
| 541 | * keep intact the entries provided by the driver and zero out the |
| 542 | * default_flags. |
| 543 | */ |
| 544 | range->default_flags = 0; |
| 545 | range->pfn_flags_mask = -1UL; |
| 546 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 547 | ret = hmm_range_register(range, range->vma->vm_mm, |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 548 | range->start, range->end, |
| 549 | PAGE_SHIFT); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 550 | if (ret) |
| 551 | return (int)ret; |
| 552 | |
| 553 | if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { |
| 554 | /* |
| 555 | * The mmap_sem was taken by driver we release it here and |
| 556 | * returns -EAGAIN which correspond to mmap_sem have been |
| 557 | * drop in the old API. |
| 558 | */ |
| 559 | up_read(&range->vma->vm_mm->mmap_sem); |
| 560 | return -EAGAIN; |
| 561 | } |
| 562 | |
| 563 | ret = hmm_range_fault(range, block); |
| 564 | if (ret <= 0) { |
| 565 | if (ret == -EBUSY || !ret) { |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 566 | /* Same as above, drop mmap_sem to match old API. */ |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 567 | up_read(&range->vma->vm_mm->mmap_sem); |
| 568 | ret = -EBUSY; |
| 569 | } else if (ret == -EAGAIN) |
| 570 | ret = -EBUSY; |
| 571 | hmm_range_unregister(range); |
| 572 | return ret; |
| 573 | } |
| 574 | return 0; |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 575 | } |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 576 | |
Arnd Bergmann | 9d8a463 | 2018-04-10 16:29:13 -0700 | [diff] [blame] | 577 | /* Below are for HMM internal use only! Not to be used by device driver! */ |
| 578 | void hmm_mm_destroy(struct mm_struct *mm); |
| 579 | |
| 580 | static inline void hmm_mm_init(struct mm_struct *mm) |
| 581 | { |
| 582 | mm->hmm = NULL; |
| 583 | } |
| 584 | #else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
| 585 | static inline void hmm_mm_destroy(struct mm_struct *mm) {} |
| 586 | static inline void hmm_mm_init(struct mm_struct *mm) {} |
| 587 | #endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 588 | |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 589 | #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC) |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 590 | struct hmm_devmem; |
| 591 | |
| 592 | struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma, |
| 593 | unsigned long addr); |
| 594 | |
| 595 | /* |
| 596 | * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events |
| 597 | * |
| 598 | * @free: call when refcount on page reach 1 and thus is no longer use |
| 599 | * @fault: call when there is a page fault to unaddressable memory |
| 600 | * |
| 601 | * Both callback happens from page_free() and page_fault() callback of struct |
| 602 | * dev_pagemap respectively. See include/linux/memremap.h for more details on |
| 603 | * those. |
| 604 | * |
| 605 | * The hmm_devmem_ops callback are just here to provide a coherent and |
| 606 | * uniq API to device driver and device driver should not register their |
| 607 | * own page_free() or page_fault() but rely on the hmm_devmem_ops call- |
| 608 | * back. |
| 609 | */ |
| 610 | struct hmm_devmem_ops { |
| 611 | /* |
| 612 | * free() - free a device page |
| 613 | * @devmem: device memory structure (see struct hmm_devmem) |
| 614 | * @page: pointer to struct page being freed |
| 615 | * |
| 616 | * Call back occurs whenever a device page refcount reach 1 which |
| 617 | * means that no one is holding any reference on the page anymore |
| 618 | * (ZONE_DEVICE page have an elevated refcount of 1 as default so |
| 619 | * that they are not release to the general page allocator). |
| 620 | * |
| 621 | * Note that callback has exclusive ownership of the page (as no |
| 622 | * one is holding any reference). |
| 623 | */ |
| 624 | void (*free)(struct hmm_devmem *devmem, struct page *page); |
| 625 | /* |
| 626 | * fault() - CPU page fault or get user page (GUP) |
| 627 | * @devmem: device memory structure (see struct hmm_devmem) |
| 628 | * @vma: virtual memory area containing the virtual address |
| 629 | * @addr: virtual address that faulted or for which there is a GUP |
| 630 | * @page: pointer to struct page backing virtual address (unreliable) |
| 631 | * @flags: FAULT_FLAG_* (see include/linux/mm.h) |
| 632 | * @pmdp: page middle directory |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 633 | * Return: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 634 | * on error |
| 635 | * |
| 636 | * The callback occurs whenever there is a CPU page fault or GUP on a |
| 637 | * virtual address. This means that the device driver must migrate the |
| 638 | * page back to regular memory (CPU accessible). |
| 639 | * |
| 640 | * The device driver is free to migrate more than one page from the |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 641 | * fault() callback as an optimization. However if the device decides |
| 642 | * to migrate more than one page it must always priotirize the faulting |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 643 | * address over the others. |
| 644 | * |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 645 | * The struct page pointer is only given as a hint to allow quick |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 646 | * lookup of internal device driver data. A concurrent migration |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 647 | * might have already freed that page and the virtual address might |
| 648 | * no longer be backed by it. So it should not be modified by the |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 649 | * callback. |
| 650 | * |
| 651 | * Note that mmap semaphore is held in read mode at least when this |
| 652 | * callback occurs, hence the vma is valid upon callback entry. |
| 653 | */ |
Souptick Joarder | b57e622e6 | 2019-03-11 23:28:10 -0700 | [diff] [blame] | 654 | vm_fault_t (*fault)(struct hmm_devmem *devmem, |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 655 | struct vm_area_struct *vma, |
| 656 | unsigned long addr, |
| 657 | const struct page *page, |
| 658 | unsigned int flags, |
| 659 | pmd_t *pmdp); |
| 660 | }; |
| 661 | |
| 662 | /* |
| 663 | * struct hmm_devmem - track device memory |
| 664 | * |
| 665 | * @completion: completion object for device memory |
| 666 | * @pfn_first: first pfn for this resource (set by hmm_devmem_add()) |
| 667 | * @pfn_last: last pfn for this resource (set by hmm_devmem_add()) |
| 668 | * @resource: IO resource reserved for this chunk of memory |
| 669 | * @pagemap: device page map for that chunk |
| 670 | * @device: device to bind resource to |
| 671 | * @ops: memory operations callback |
| 672 | * @ref: per CPU refcount |
Dan Williams | 063a7d1 | 2018-12-28 00:39:46 -0800 | [diff] [blame] | 673 | * @page_fault: callback when CPU fault on an unaddressable device page |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 674 | * |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame^] | 675 | * This is a helper structure for device drivers that do not wish to implement |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 676 | * the gory details related to hotplugging new memoy and allocating struct |
| 677 | * pages. |
| 678 | * |
| 679 | * Device drivers can directly use ZONE_DEVICE memory on their own if they |
| 680 | * wish to do so. |
Dan Williams | 063a7d1 | 2018-12-28 00:39:46 -0800 | [diff] [blame] | 681 | * |
| 682 | * The page_fault() callback must migrate page back, from device memory to |
| 683 | * system memory, so that the CPU can access it. This might fail for various |
| 684 | * reasons (device issues, device have been unplugged, ...). When such error |
| 685 | * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and |
| 686 | * set the CPU page table entry to "poisoned". |
| 687 | * |
| 688 | * Note that because memory cgroup charges are transferred to the device memory, |
| 689 | * this should never fail due to memory restrictions. However, allocation |
| 690 | * of a regular system page might still fail because we are out of memory. If |
| 691 | * that happens, the page_fault() callback must return VM_FAULT_OOM. |
| 692 | * |
| 693 | * The page_fault() callback can also try to migrate back multiple pages in one |
| 694 | * chunk, as an optimization. It must, however, prioritize the faulting address |
| 695 | * over all the others. |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 696 | */ |
Souptick Joarder | b57e622e6 | 2019-03-11 23:28:10 -0700 | [diff] [blame] | 697 | typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma, |
Dan Williams | 063a7d1 | 2018-12-28 00:39:46 -0800 | [diff] [blame] | 698 | unsigned long addr, |
| 699 | const struct page *page, |
| 700 | unsigned int flags, |
| 701 | pmd_t *pmdp); |
| 702 | |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 703 | struct hmm_devmem { |
| 704 | struct completion completion; |
| 705 | unsigned long pfn_first; |
| 706 | unsigned long pfn_last; |
| 707 | struct resource *resource; |
| 708 | struct device *device; |
| 709 | struct dev_pagemap pagemap; |
| 710 | const struct hmm_devmem_ops *ops; |
| 711 | struct percpu_ref ref; |
Dan Williams | 063a7d1 | 2018-12-28 00:39:46 -0800 | [diff] [blame] | 712 | dev_page_fault_t page_fault; |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 713 | }; |
| 714 | |
| 715 | /* |
| 716 | * To add (hotplug) device memory, HMM assumes that there is no real resource |
| 717 | * that reserves a range in the physical address space (this is intended to be |
| 718 | * use by unaddressable device memory). It will reserve a physical range big |
| 719 | * enough and allocate struct page for it. |
| 720 | * |
| 721 | * The device driver can wrap the hmm_devmem struct inside a private device |
Dan Williams | 58ef15b | 2018-12-28 00:35:07 -0800 | [diff] [blame] | 722 | * driver struct. |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 723 | */ |
| 724 | struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops, |
| 725 | struct device *device, |
| 726 | unsigned long size); |
Jérôme Glisse | d3df0a4 | 2017-09-08 16:12:28 -0700 | [diff] [blame] | 727 | struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops, |
| 728 | struct device *device, |
| 729 | struct resource *res); |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 730 | |
| 731 | /* |
| 732 | * hmm_devmem_page_set_drvdata - set per-page driver data field |
| 733 | * |
| 734 | * @page: pointer to struct page |
| 735 | * @data: driver data value to set |
| 736 | * |
| 737 | * Because page can not be on lru we have an unsigned long that driver can use |
| 738 | * to store a per page field. This just a simple helper to do that. |
| 739 | */ |
| 740 | static inline void hmm_devmem_page_set_drvdata(struct page *page, |
| 741 | unsigned long data) |
| 742 | { |
Matthew Wilcox | 50e7fbc | 2018-06-07 17:09:01 -0700 | [diff] [blame] | 743 | page->hmm_data = data; |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 744 | } |
| 745 | |
| 746 | /* |
| 747 | * hmm_devmem_page_get_drvdata - get per page driver data field |
| 748 | * |
| 749 | * @page: pointer to struct page |
| 750 | * Return: driver data value |
| 751 | */ |
Ralph Campbell | 0bea803 | 2017-11-15 17:34:00 -0800 | [diff] [blame] | 752 | static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page) |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 753 | { |
Matthew Wilcox | 50e7fbc | 2018-06-07 17:09:01 -0700 | [diff] [blame] | 754 | return page->hmm_data; |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 755 | } |
Jérôme Glisse | 858b54d | 2017-09-08 16:12:02 -0700 | [diff] [blame] | 756 | |
| 757 | |
| 758 | /* |
| 759 | * struct hmm_device - fake device to hang device memory onto |
| 760 | * |
| 761 | * @device: device struct |
| 762 | * @minor: device minor number |
| 763 | */ |
| 764 | struct hmm_device { |
| 765 | struct device device; |
| 766 | unsigned int minor; |
| 767 | }; |
| 768 | |
| 769 | /* |
| 770 | * A device driver that wants to handle multiple devices memory through a |
| 771 | * single fake device can use hmm_device to do so. This is purely a helper and |
| 772 | * it is not strictly needed, in order to make use of any HMM functionality. |
| 773 | */ |
| 774 | struct hmm_device *hmm_device_new(void *drvdata); |
| 775 | void hmm_device_put(struct hmm_device *hmm_device); |
Jérôme Glisse | df6ad69 | 2017-09-08 16:12:24 -0700 | [diff] [blame] | 776 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |
Jérôme Glisse | 6b368cd | 2017-09-08 16:12:32 -0700 | [diff] [blame] | 777 | #else /* IS_ENABLED(CONFIG_HMM) */ |
| 778 | static inline void hmm_mm_destroy(struct mm_struct *mm) {} |
| 779 | static inline void hmm_mm_init(struct mm_struct *mm) {} |
Jérôme Glisse | b28b08d | 2018-04-10 16:28:15 -0700 | [diff] [blame] | 780 | #endif /* IS_ENABLED(CONFIG_HMM) */ |
Arnd Bergmann | 9d8a463 | 2018-04-10 16:29:13 -0700 | [diff] [blame] | 781 | |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 782 | #endif /* LINUX_HMM_H */ |