blob: dd907f614dfe5978522f2e7eb17c164e1404f578 [file] [log] [blame]
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07001/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * Authors: Jérôme Glisse <jglisse@redhat.com>
15 */
16/*
17 * Heterogeneous Memory Management (HMM)
18 *
19 * See Documentation/vm/hmm.txt for reasons and overview of what HMM is and it
20 * is for. Here we focus on the HMM API description, with some explanation of
21 * the underlying implementation.
22 *
23 * Short description: HMM provides a set of helpers to share a virtual address
24 * space between CPU and a device, so that the device can access any valid
25 * address of the process (while still obeying memory protection). HMM also
26 * provides helpers to migrate process memory to device memory, and back. Each
27 * set of functionality (address space mirroring, and migration to and from
28 * device memory) can be used independently of the other.
29 *
30 *
31 * HMM address space mirroring API:
32 *
33 * Use HMM address space mirroring if you want to mirror range of the CPU page
34 * table of a process into a device page table. Here, "mirror" means "keep
35 * synchronized". Prerequisites: the device must provide the ability to write-
36 * protect its page tables (at PAGE_SIZE granularity), and must be able to
37 * recover from the resulting potential page faults.
38 *
39 * HMM guarantees that at any point in time, a given virtual address points to
40 * either the same memory in both CPU and device page tables (that is: CPU and
41 * device page tables each point to the same pages), or that one page table (CPU
42 * or device) points to no entry, while the other still points to the old page
43 * for the address. The latter case happens when the CPU page table update
44 * happens first, and then the update is mirrored over to the device page table.
45 * This does not cause any issue, because the CPU page table cannot start
46 * pointing to a new page until the device page table is invalidated.
47 *
48 * HMM uses mmu_notifiers to monitor the CPU page tables, and forwards any
49 * updates to each device driver that has registered a mirror. It also provides
50 * some API calls to help with taking a snapshot of the CPU page table, and to
51 * synchronize with any updates that might happen concurrently.
52 *
53 *
54 * HMM migration to and from device memory:
55 *
56 * HMM provides a set of helpers to hotplug device memory as ZONE_DEVICE, with
57 * a new MEMORY_DEVICE_PRIVATE type. This provides a struct page for each page
58 * of the device memory, and allows the device driver to manage its memory
59 * using those struct pages. Having struct pages for device memory makes
60 * migration easier. Because that memory is not addressable by the CPU it must
61 * never be pinned to the device; in other words, any CPU page fault can always
62 * cause the device memory to be migrated (copied/moved) back to regular memory.
63 *
64 * A new migrate helper (migrate_vma()) has been added (see mm/migrate.c) that
65 * allows use of a device DMA engine to perform the copy operation between
66 * regular system memory and device memory.
67 */
68#ifndef LINUX_HMM_H
69#define LINUX_HMM_H
70
71#include <linux/kconfig.h>
72
73#if IS_ENABLED(CONFIG_HMM)
74
Jérôme Glisse858b54d2017-09-08 16:12:02 -070075#include <linux/device.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070076#include <linux/migrate.h>
77#include <linux/memremap.h>
78#include <linux/completion.h>
79
Jérôme Glissec0b12402017-09-08 16:11:27 -070080struct hmm;
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070081
82/*
83 * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page
84 *
85 * Flags:
Jérôme Glisse86586a42018-04-10 16:28:34 -070086 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070087 * HMM_PFN_WRITE: CPU page table has write permission set
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070088 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
89 * HMM_PFN_EMPTY: corresponding CPU page table entry is pte_none()
90 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
91 * result of vm_insert_pfn() or vm_insert_page(). Therefore, it should not
92 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
93 * set and the pfn value is undefined.
94 * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070095 */
96typedef unsigned long hmm_pfn_t;
97
98#define HMM_PFN_VALID (1 << 0)
Jérôme Glisse86586a42018-04-10 16:28:34 -070099#define HMM_PFN_WRITE (1 << 1)
100#define HMM_PFN_ERROR (1 << 2)
101#define HMM_PFN_EMPTY (1 << 3)
102#define HMM_PFN_SPECIAL (1 << 4)
103#define HMM_PFN_DEVICE_UNADDRESSABLE (1 << 5)
104#define HMM_PFN_SHIFT 6
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700105
106/*
107 * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t
108 * @pfn: hmm_pfn_t to convert to struct page
109 * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise
110 *
111 * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page
112 * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL.
113 */
114static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn)
115{
116 if (!(pfn & HMM_PFN_VALID))
117 return NULL;
118 return pfn_to_page(pfn >> HMM_PFN_SHIFT);
119}
120
121/*
122 * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t
123 * @pfn: hmm_pfn_t to extract pfn from
124 * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise
125 */
126static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn)
127{
128 if (!(pfn & HMM_PFN_VALID))
129 return -1UL;
130 return (pfn >> HMM_PFN_SHIFT);
131}
132
133/*
134 * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page
135 * @page: struct page pointer for which to create the hmm_pfn_t
136 * Returns: valid hmm_pfn_t for the page
137 */
138static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page)
139{
140 return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID;
141}
142
143/*
144 * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn
145 * @pfn: pfn value for which to create the hmm_pfn_t
146 * Returns: valid hmm_pfn_t for the pfn
147 */
148static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn)
149{
150 return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID;
151}
152
153
Jérôme Glissec0b12402017-09-08 16:11:27 -0700154#if IS_ENABLED(CONFIG_HMM_MIRROR)
155/*
156 * Mirroring: how to synchronize device page table with CPU page table.
157 *
158 * A device driver that is participating in HMM mirroring must always
159 * synchronize with CPU page table updates. For this, device drivers can either
160 * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
161 * drivers can decide to register one mirror per device per process, or just
162 * one mirror per process for a group of devices. The pattern is:
163 *
164 * int device_bind_address_space(..., struct mm_struct *mm, ...)
165 * {
166 * struct device_address_space *das;
167 *
168 * // Device driver specific initialization, and allocation of das
169 * // which contains an hmm_mirror struct as one of its fields.
170 * ...
171 *
172 * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
173 * if (ret) {
174 * // Cleanup on error
175 * return ret;
176 * }
177 *
178 * // Other device driver specific initialization
179 * ...
180 * }
181 *
182 * Once an hmm_mirror is registered for an address space, the device driver
183 * will get callbacks through sync_cpu_device_pagetables() operation (see
184 * hmm_mirror_ops struct).
185 *
186 * Device driver must not free the struct containing the hmm_mirror struct
187 * before calling hmm_mirror_unregister(). The expected usage is to do that when
188 * the device driver is unbinding from an address space.
189 *
190 *
191 * void device_unbind_address_space(struct device_address_space *das)
192 * {
193 * // Device driver specific cleanup
194 * ...
195 *
196 * hmm_mirror_unregister(&das->mirror);
197 *
198 * // Other device driver specific cleanup, and now das can be freed
199 * ...
200 * }
201 */
202
203struct hmm_mirror;
204
205/*
206 * enum hmm_update_type - type of update
207 * @HMM_UPDATE_INVALIDATE: invalidate range (no indication as to why)
208 */
209enum hmm_update_type {
210 HMM_UPDATE_INVALIDATE,
211};
212
213/*
214 * struct hmm_mirror_ops - HMM mirror device operations callback
215 *
216 * @update: callback to update range on a device
217 */
218struct hmm_mirror_ops {
Ralph Campbelle1401512018-04-10 16:28:19 -0700219 /* release() - release hmm_mirror
220 *
221 * @mirror: pointer to struct hmm_mirror
222 *
223 * This is called when the mm_struct is being released.
224 * The callback should make sure no references to the mirror occur
225 * after the callback returns.
226 */
227 void (*release)(struct hmm_mirror *mirror);
228
Jérôme Glissec0b12402017-09-08 16:11:27 -0700229 /* sync_cpu_device_pagetables() - synchronize page tables
230 *
231 * @mirror: pointer to struct hmm_mirror
232 * @update_type: type of update that occurred to the CPU page table
233 * @start: virtual start address of the range to update
234 * @end: virtual end address of the range to update
235 *
236 * This callback ultimately originates from mmu_notifiers when the CPU
237 * page table is updated. The device driver must update its page table
238 * in response to this callback. The update argument tells what action
239 * to perform.
240 *
241 * The device driver must not return from this callback until the device
242 * page tables are completely updated (TLBs flushed, etc); this is a
243 * synchronous call.
244 */
245 void (*sync_cpu_device_pagetables)(struct hmm_mirror *mirror,
246 enum hmm_update_type update_type,
247 unsigned long start,
248 unsigned long end);
249};
250
251/*
252 * struct hmm_mirror - mirror struct for a device driver
253 *
254 * @hmm: pointer to struct hmm (which is unique per mm_struct)
255 * @ops: device driver callback for HMM mirror operations
256 * @list: for list of mirrors of a given mm
257 *
258 * Each address space (mm_struct) being mirrored by a device must register one
259 * instance of an hmm_mirror struct with HMM. HMM will track the list of all
260 * mirrors for each mm_struct.
261 */
262struct hmm_mirror {
263 struct hmm *hmm;
264 const struct hmm_mirror_ops *ops;
265 struct list_head list;
266};
267
268int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
269void hmm_mirror_unregister(struct hmm_mirror *mirror);
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700270
271
272/*
273 * struct hmm_range - track invalidation lock on virtual address range
274 *
Jérôme Glisse08232a42018-04-10 16:28:30 -0700275 * @vma: the vm area struct for the range
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700276 * @list: all range lock are on a list
277 * @start: range virtual start address (inclusive)
278 * @end: range virtual end address (exclusive)
279 * @pfns: array of pfns (big enough for the range)
280 * @valid: pfns array did not change since it has been fill by an HMM function
281 */
282struct hmm_range {
Jérôme Glisse08232a42018-04-10 16:28:30 -0700283 struct vm_area_struct *vma;
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700284 struct list_head list;
285 unsigned long start;
286 unsigned long end;
287 hmm_pfn_t *pfns;
288 bool valid;
289};
290
291/*
292 * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device
293 * driver lock that serializes device page table updates, then call
294 * hmm_vma_range_done(), to check if the snapshot is still valid. The same
295 * device driver page table update lock must also be used in the
296 * hmm_mirror_ops.sync_cpu_device_pagetables() callback, so that CPU page
297 * table invalidation serializes on it.
298 *
299 * YOU MUST CALL hmm_vma_range_done() ONCE AND ONLY ONCE EACH TIME YOU CALL
300 * hmm_vma_get_pfns() WITHOUT ERROR !
301 *
302 * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID !
303 */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700304int hmm_vma_get_pfns(struct hmm_range *range);
305bool hmm_vma_range_done(struct hmm_range *range);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700306
307
308/*
309 * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
310 * not migrate any device memory back to system memory. The hmm_pfn_t array will
311 * be updated with the fault result and current snapshot of the CPU page table
312 * for the range.
313 *
314 * The mmap_sem must be taken in read mode before entering and it might be
315 * dropped by the function if the block argument is false. In that case, the
316 * function returns -EAGAIN.
317 *
318 * Return value does not reflect if the fault was successful for every single
319 * address or not. Therefore, the caller must to inspect the hmm_pfn_t array to
320 * determine fault status for each address.
321 *
322 * Trying to fault inside an invalid vma will result in -EINVAL.
323 *
324 * See the function description in mm/hmm.c for further documentation.
325 */
Jérôme Glisse08232a42018-04-10 16:28:30 -0700326int hmm_vma_fault(struct hmm_range *range, bool write, bool block);
Jérôme Glissec0b12402017-09-08 16:11:27 -0700327#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
328
329
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700330#if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700331struct hmm_devmem;
332
333struct page *hmm_vma_alloc_locked_page(struct vm_area_struct *vma,
334 unsigned long addr);
335
336/*
337 * struct hmm_devmem_ops - callback for ZONE_DEVICE memory events
338 *
339 * @free: call when refcount on page reach 1 and thus is no longer use
340 * @fault: call when there is a page fault to unaddressable memory
341 *
342 * Both callback happens from page_free() and page_fault() callback of struct
343 * dev_pagemap respectively. See include/linux/memremap.h for more details on
344 * those.
345 *
346 * The hmm_devmem_ops callback are just here to provide a coherent and
347 * uniq API to device driver and device driver should not register their
348 * own page_free() or page_fault() but rely on the hmm_devmem_ops call-
349 * back.
350 */
351struct hmm_devmem_ops {
352 /*
353 * free() - free a device page
354 * @devmem: device memory structure (see struct hmm_devmem)
355 * @page: pointer to struct page being freed
356 *
357 * Call back occurs whenever a device page refcount reach 1 which
358 * means that no one is holding any reference on the page anymore
359 * (ZONE_DEVICE page have an elevated refcount of 1 as default so
360 * that they are not release to the general page allocator).
361 *
362 * Note that callback has exclusive ownership of the page (as no
363 * one is holding any reference).
364 */
365 void (*free)(struct hmm_devmem *devmem, struct page *page);
366 /*
367 * fault() - CPU page fault or get user page (GUP)
368 * @devmem: device memory structure (see struct hmm_devmem)
369 * @vma: virtual memory area containing the virtual address
370 * @addr: virtual address that faulted or for which there is a GUP
371 * @page: pointer to struct page backing virtual address (unreliable)
372 * @flags: FAULT_FLAG_* (see include/linux/mm.h)
373 * @pmdp: page middle directory
374 * Returns: VM_FAULT_MINOR/MAJOR on success or one of VM_FAULT_ERROR
375 * on error
376 *
377 * The callback occurs whenever there is a CPU page fault or GUP on a
378 * virtual address. This means that the device driver must migrate the
379 * page back to regular memory (CPU accessible).
380 *
381 * The device driver is free to migrate more than one page from the
382 * fault() callback as an optimization. However if device decide to
383 * migrate more than one page it must always priotirize the faulting
384 * address over the others.
385 *
386 * The struct page pointer is only given as an hint to allow quick
387 * lookup of internal device driver data. A concurrent migration
388 * might have already free that page and the virtual address might
389 * not longer be back by it. So it should not be modified by the
390 * callback.
391 *
392 * Note that mmap semaphore is held in read mode at least when this
393 * callback occurs, hence the vma is valid upon callback entry.
394 */
395 int (*fault)(struct hmm_devmem *devmem,
396 struct vm_area_struct *vma,
397 unsigned long addr,
398 const struct page *page,
399 unsigned int flags,
400 pmd_t *pmdp);
401};
402
403/*
404 * struct hmm_devmem - track device memory
405 *
406 * @completion: completion object for device memory
407 * @pfn_first: first pfn for this resource (set by hmm_devmem_add())
408 * @pfn_last: last pfn for this resource (set by hmm_devmem_add())
409 * @resource: IO resource reserved for this chunk of memory
410 * @pagemap: device page map for that chunk
411 * @device: device to bind resource to
412 * @ops: memory operations callback
413 * @ref: per CPU refcount
414 *
415 * This an helper structure for device drivers that do not wish to implement
416 * the gory details related to hotplugging new memoy and allocating struct
417 * pages.
418 *
419 * Device drivers can directly use ZONE_DEVICE memory on their own if they
420 * wish to do so.
421 */
422struct hmm_devmem {
423 struct completion completion;
424 unsigned long pfn_first;
425 unsigned long pfn_last;
426 struct resource *resource;
427 struct device *device;
428 struct dev_pagemap pagemap;
429 const struct hmm_devmem_ops *ops;
430 struct percpu_ref ref;
431};
432
433/*
434 * To add (hotplug) device memory, HMM assumes that there is no real resource
435 * that reserves a range in the physical address space (this is intended to be
436 * use by unaddressable device memory). It will reserve a physical range big
437 * enough and allocate struct page for it.
438 *
439 * The device driver can wrap the hmm_devmem struct inside a private device
440 * driver struct. The device driver must call hmm_devmem_remove() before the
441 * device goes away and before freeing the hmm_devmem struct memory.
442 */
443struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
444 struct device *device,
445 unsigned long size);
Jérôme Glissed3df0a42017-09-08 16:12:28 -0700446struct hmm_devmem *hmm_devmem_add_resource(const struct hmm_devmem_ops *ops,
447 struct device *device,
448 struct resource *res);
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700449void hmm_devmem_remove(struct hmm_devmem *devmem);
450
451/*
452 * hmm_devmem_page_set_drvdata - set per-page driver data field
453 *
454 * @page: pointer to struct page
455 * @data: driver data value to set
456 *
457 * Because page can not be on lru we have an unsigned long that driver can use
458 * to store a per page field. This just a simple helper to do that.
459 */
460static inline void hmm_devmem_page_set_drvdata(struct page *page,
461 unsigned long data)
462{
463 unsigned long *drvdata = (unsigned long *)&page->pgmap;
464
465 drvdata[1] = data;
466}
467
468/*
469 * hmm_devmem_page_get_drvdata - get per page driver data field
470 *
471 * @page: pointer to struct page
472 * Return: driver data value
473 */
Ralph Campbell0bea8032017-11-15 17:34:00 -0800474static inline unsigned long hmm_devmem_page_get_drvdata(const struct page *page)
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700475{
Ralph Campbell0bea8032017-11-15 17:34:00 -0800476 const unsigned long *drvdata = (const unsigned long *)&page->pgmap;
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700477
478 return drvdata[1];
479}
Jérôme Glisse858b54d2017-09-08 16:12:02 -0700480
481
482/*
483 * struct hmm_device - fake device to hang device memory onto
484 *
485 * @device: device struct
486 * @minor: device minor number
487 */
488struct hmm_device {
489 struct device device;
490 unsigned int minor;
491};
492
493/*
494 * A device driver that wants to handle multiple devices memory through a
495 * single fake device can use hmm_device to do so. This is purely a helper and
496 * it is not strictly needed, in order to make use of any HMM functionality.
497 */
498struct hmm_device *hmm_device_new(void *drvdata);
499void hmm_device_put(struct hmm_device *hmm_device);
Jérôme Glissedf6ad692017-09-08 16:12:24 -0700500#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
Jérôme Glisse4ef589d2017-09-08 16:11:58 -0700501
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700502/* Below are for HMM internal use only! Not to be used by device driver! */
503void hmm_mm_destroy(struct mm_struct *mm);
504
505static inline void hmm_mm_init(struct mm_struct *mm)
506{
507 mm->hmm = NULL;
508}
Jérôme Glisse6b368cd2017-09-08 16:12:32 -0700509#else /* IS_ENABLED(CONFIG_HMM) */
510static inline void hmm_mm_destroy(struct mm_struct *mm) {}
511static inline void hmm_mm_init(struct mm_struct *mm) {}
Jérôme Glisseb28b08d2018-04-10 16:28:15 -0700512#endif /* IS_ENABLED(CONFIG_HMM) */
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700513#endif /* LINUX_HMM_H */