blob: daee6508a3f60901b8c77c3c73ed7f56c4ac2e35 [file] [log] [blame]
Thomas Gleixnerc942fdd2019-05-27 08:55:06 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07002/*
3 * Copyright 2013 Red Hat Inc.
4 *
Jérôme Glissef813f212018-10-30 15:04:06 -07005 * Authors: Jérôme Glisse <jglisse@redhat.com>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07006 *
Jason Gunthorpef970b972020-03-27 17:00:15 -03007 * See Documentation/vm/hmm.rst for reasons and overview of what HMM is.
Jérôme Glisse133ff0e2017-09-08 16:11:23 -07008 */
9#ifndef LINUX_HMM_H
10#define LINUX_HMM_H
11
12#include <linux/kconfig.h>
Dan Williams063a7d12018-12-28 00:39:46 -080013#include <asm/pgtable.h>
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070014
Jérôme Glisse858b54d2017-09-08 16:12:02 -070015#include <linux/device.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070016#include <linux/migrate.h>
17#include <linux/memremap.h>
18#include <linux/completion.h>
Jérôme Glissea3e0d412019-05-13 17:20:01 -070019#include <linux/mmu_notifier.h>
Jérôme Glisse4ef589d2017-09-08 16:11:58 -070020
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070021/*
Jérôme Glissef88a1e92018-04-10 16:29:06 -070022 * hmm_pfn_flag_e - HMM flag enums
23 *
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070024 * Flags:
Jérôme Glisse86586a42018-04-10 16:28:34 -070025 * HMM_PFN_VALID: pfn is valid. It has, at least, read permission.
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070026 * HMM_PFN_WRITE: CPU page table has write permission set
Jérôme Glissef88a1e92018-04-10 16:29:06 -070027 *
Ralph Campbell085ea252019-05-06 16:29:39 -070028 * The driver provides a flags array for mapping page protections to device
29 * PTE bits. If the driver valid bit for an entry is bit 3,
30 * i.e., (entry & (1 << 3)), then the driver must provide
Jérôme Glissef88a1e92018-04-10 16:29:06 -070031 * an array in hmm_range.flags with hmm_range.flags[HMM_PFN_VALID] == 1 << 3.
Ralph Campbell085ea252019-05-06 16:29:39 -070032 * Same logic apply to all flags. This is the same idea as vm_page_prot in vma
Jérôme Glissef88a1e92018-04-10 16:29:06 -070033 * except that this is per device driver rather than per architecture.
34 */
35enum hmm_pfn_flag_e {
36 HMM_PFN_VALID = 0,
37 HMM_PFN_WRITE,
Jérôme Glissef88a1e92018-04-10 16:29:06 -070038 HMM_PFN_FLAG_MAX
39};
40
41/*
42 * hmm_pfn_value_e - HMM pfn special value
43 *
44 * Flags:
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070045 * HMM_PFN_ERROR: corresponding CPU page table entry points to poisoned memory
Jérôme Glissef88a1e92018-04-10 16:29:06 -070046 * HMM_PFN_NONE: corresponding CPU page table entry is pte_none()
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070047 * HMM_PFN_SPECIAL: corresponding CPU page table entry is special; i.e., the
Matthew Wilcox67fa1662018-10-26 15:04:26 -070048 * result of vmf_insert_pfn() or vm_insert_page(). Therefore, it should not
Jérôme Glisseda4c3c72017-09-08 16:11:31 -070049 * be mirrored by a device, because the entry will never have HMM_PFN_VALID
50 * set and the pfn value is undefined.
Jérôme Glissef88a1e92018-04-10 16:29:06 -070051 *
Ralph Campbell085ea252019-05-06 16:29:39 -070052 * Driver provides values for none entry, error entry, and special entry.
53 * Driver can alias (i.e., use same value) error and special, but
54 * it should not alias none with error or special.
Jérôme Glissef88a1e92018-04-10 16:29:06 -070055 *
56 * HMM pfn value returned by hmm_vma_get_pfns() or hmm_vma_fault() will be:
57 * hmm_range.values[HMM_PFN_ERROR] if CPU page table entry is poisonous,
Ralph Campbell085ea252019-05-06 16:29:39 -070058 * hmm_range.values[HMM_PFN_NONE] if there is no CPU page table entry,
Jérôme Glissef88a1e92018-04-10 16:29:06 -070059 * hmm_range.values[HMM_PFN_SPECIAL] if CPU page table entry is a special one
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070060 */
Jérôme Glissef88a1e92018-04-10 16:29:06 -070061enum hmm_pfn_value_e {
62 HMM_PFN_ERROR,
63 HMM_PFN_NONE,
64 HMM_PFN_SPECIAL,
65 HMM_PFN_VALUE_MAX
66};
67
68/*
69 * struct hmm_range - track invalidation lock on virtual address range
70 *
Jason Gunthorpea22dd502019-11-12 16:22:30 -040071 * @notifier: a mmu_interval_notifier that includes the start/end
72 * @notifier_seq: result of mmu_interval_read_begin()
Jérôme Glissef88a1e92018-04-10 16:29:06 -070073 * @start: range virtual start address (inclusive)
74 * @end: range virtual end address (exclusive)
75 * @pfns: array of pfns (big enough for the range)
76 * @flags: pfn flags to match device driver page table
77 * @values: pfn value for some special case (none, special, error, ...)
Jérôme Glisse023a0192019-05-13 17:20:05 -070078 * @default_flags: default flags for the range (write, read, ... see hmm doc)
79 * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
Jason Gunthorpef970b972020-03-27 17:00:15 -030080 * @pfn_shift: pfn shift value (should be <= PAGE_SHIFT)
Christoph Hellwig08ddddd2020-03-16 20:32:16 +010081 * @dev_private_owner: owner of device private pages
Jérôme Glissef88a1e92018-04-10 16:29:06 -070082 */
83struct hmm_range {
Jason Gunthorpe04ec32f2019-11-12 16:22:20 -040084 struct mmu_interval_notifier *notifier;
85 unsigned long notifier_seq;
Jérôme Glissef88a1e92018-04-10 16:29:06 -070086 unsigned long start;
87 unsigned long end;
88 uint64_t *pfns;
89 const uint64_t *flags;
90 const uint64_t *values;
Jérôme Glisse023a0192019-05-13 17:20:05 -070091 uint64_t default_flags;
92 uint64_t pfn_flags_mask;
Jérôme Glissef88a1e92018-04-10 16:29:06 -070093 uint8_t pfn_shift;
Christoph Hellwig08ddddd2020-03-16 20:32:16 +010094 void *dev_private_owner;
Jérôme Glissef88a1e92018-04-10 16:29:06 -070095};
Jérôme Glisse133ff0e2017-09-08 16:11:23 -070096
97/*
Jérôme Glisse391aab12019-05-13 17:20:31 -070098 * hmm_device_entry_to_page() - return struct page pointed to by a device entry
99 * @range: range use to decode device entry value
100 * @entry: device entry value to get corresponding struct page from
Ralph Campbell085ea252019-05-06 16:29:39 -0700101 * Return: struct page pointer if entry is a valid, NULL otherwise
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700102 *
Jérôme Glisse391aab12019-05-13 17:20:31 -0700103 * If the device entry is valid (ie valid flag set) then return the struct page
104 * matching the entry value. Otherwise return NULL.
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700105 */
Jérôme Glisse391aab12019-05-13 17:20:31 -0700106static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
107 uint64_t entry)
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700108{
Jérôme Glisse391aab12019-05-13 17:20:31 -0700109 if (entry == range->values[HMM_PFN_NONE])
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700110 return NULL;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700111 if (entry == range->values[HMM_PFN_ERROR])
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700112 return NULL;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700113 if (entry == range->values[HMM_PFN_SPECIAL])
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700114 return NULL;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700115 if (!(entry & range->flags[HMM_PFN_VALID]))
Jérôme Glissef88a1e92018-04-10 16:29:06 -0700116 return NULL;
Jérôme Glisse391aab12019-05-13 17:20:31 -0700117 return pfn_to_page(entry >> range->pfn_shift);
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700118}
119
Jason Gunthorpe107e8992019-11-12 16:22:21 -0400120/* Don't fault in missing PTEs, just snapshot the current state. */
121#define HMM_FAULT_SNAPSHOT (1 << 1)
122
Jérôme Glisse20239412019-05-13 17:20:24 -0700123/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700124 * Please see Documentation/vm/hmm.rst for how to use the range API.
Jérôme Glisseda4c3c72017-09-08 16:11:31 -0700125 */
Christoph Hellwig9a4903e2019-07-25 17:56:46 -0700126long hmm_range_fault(struct hmm_range *range, unsigned int flags);
Jérôme Glisse74eee182017-09-08 16:11:35 -0700127
128/*
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700129 * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
Jérôme Glisse74eee182017-09-08 16:11:35 -0700130 *
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700131 * When waiting for mmu notifiers we need some kind of time out otherwise we
132 * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
133 * wait already.
Jérôme Glisse74eee182017-09-08 16:11:35 -0700134 */
Jérôme Glissea3e0d412019-05-13 17:20:01 -0700135#define HMM_RANGE_DEFAULT_TIMEOUT 1000
136
Jérôme Glisse133ff0e2017-09-08 16:11:23 -0700137#endif /* LINUX_HMM_H */