Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2013 Red Hat Inc. |
| 4 | * |
Jérôme Glisse | f813f21 | 2018-10-30 15:04:06 -0700 | [diff] [blame] | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 6 | */ |
| 7 | /* |
| 8 | * Refer to include/linux/hmm.h for information about heterogeneous memory |
| 9 | * management or HMM for short. |
| 10 | */ |
Christoph Hellwig | a520110 | 2019-08-28 16:19:53 +0200 | [diff] [blame] | 11 | #include <linux/pagewalk.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 12 | #include <linux/hmm.h> |
Jérôme Glisse | 858b54d | 2017-09-08 16:12:02 -0700 | [diff] [blame] | 13 | #include <linux/init.h> |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 14 | #include <linux/rmap.h> |
| 15 | #include <linux/swap.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 16 | #include <linux/slab.h> |
| 17 | #include <linux/sched.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 18 | #include <linux/mmzone.h> |
| 19 | #include <linux/pagemap.h> |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 20 | #include <linux/swapops.h> |
| 21 | #include <linux/hugetlb.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 22 | #include <linux/memremap.h> |
Jason Gunthorpe | c8a53b2 | 2019-05-23 10:36:46 -0300 | [diff] [blame] | 23 | #include <linux/sched/mm.h> |
Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 24 | #include <linux/jump_label.h> |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 25 | #include <linux/dma-mapping.h> |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 26 | #include <linux/mmu_notifier.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 27 | #include <linux/memory_hotplug.h> |
| 28 | |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 29 | struct hmm_vma_walk { |
| 30 | struct hmm_range *range; |
| 31 | unsigned long last; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 32 | }; |
| 33 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 34 | enum { |
| 35 | HMM_NEED_FAULT = 1 << 0, |
| 36 | HMM_NEED_WRITE_FAULT = 1 << 1, |
| 37 | HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT, |
| 38 | }; |
| 39 | |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 40 | static int hmm_pfns_fill(unsigned long addr, unsigned long end, |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 41 | struct hmm_range *range, unsigned long cpu_flags) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 42 | { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 43 | unsigned long i = (addr - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 44 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 45 | for (; addr < end; addr += PAGE_SIZE, i++) |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 46 | range->hmm_pfns[i] = cpu_flags; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 47 | return 0; |
| 48 | } |
| 49 | |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 50 | /* |
Christoph Hellwig | f8c888a | 2020-03-16 14:53:09 +0100 | [diff] [blame] | 51 | * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 52 | * @addr: range virtual start address (inclusive) |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 53 | * @end: range virtual end address (exclusive) |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 54 | * @required_fault: HMM_NEED_* flags |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 55 | * @walk: mm_walk structure |
Christoph Hellwig | f8c888a | 2020-03-16 14:53:09 +0100 | [diff] [blame] | 56 | * Return: -EBUSY after page fault, or page fault error |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 57 | * |
| 58 | * This function will be called whenever pmd_none() or pte_none() returns true, |
| 59 | * or whenever there is no page directory covering the virtual address range. |
| 60 | */ |
Christoph Hellwig | f8c888a | 2020-03-16 14:53:09 +0100 | [diff] [blame] | 61 | static int hmm_vma_fault(unsigned long addr, unsigned long end, |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 62 | unsigned int required_fault, struct mm_walk *walk) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 63 | { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 64 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Christoph Hellwig | 5a0c38d | 2020-03-16 14:53:10 +0100 | [diff] [blame] | 65 | struct vm_area_struct *vma = walk->vma; |
Christoph Hellwig | 5a0c38d | 2020-03-16 14:53:10 +0100 | [diff] [blame] | 66 | unsigned int fault_flags = FAULT_FLAG_REMOTE; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 67 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 68 | WARN_ON_ONCE(!required_fault); |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 69 | hmm_vma_walk->last = addr; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 70 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 71 | if (required_fault & HMM_NEED_WRITE_FAULT) { |
Christoph Hellwig | 5a0c38d | 2020-03-16 14:53:10 +0100 | [diff] [blame] | 72 | if (!(vma->vm_flags & VM_WRITE)) |
| 73 | return -EPERM; |
| 74 | fault_flags |= FAULT_FLAG_WRITE; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Jason Gunthorpe | 53bfe17f | 2020-03-27 17:00:20 -0300 | [diff] [blame] | 77 | for (; addr < end; addr += PAGE_SIZE) |
Christoph Hellwig | 5a0c38d | 2020-03-16 14:53:10 +0100 | [diff] [blame] | 78 | if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR) |
Jason Gunthorpe | 53bfe17f | 2020-03-27 17:00:20 -0300 | [diff] [blame] | 79 | return -EFAULT; |
Christoph Hellwig | f8c888a | 2020-03-16 14:53:09 +0100 | [diff] [blame] | 80 | return -EBUSY; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 81 | } |
| 82 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 83 | static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 84 | unsigned long pfn_req_flags, |
| 85 | unsigned long cpu_flags) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 86 | { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 87 | struct hmm_range *range = hmm_vma_walk->range; |
| 88 | |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 89 | /* |
| 90 | * So we not only consider the individual per page request we also |
| 91 | * consider the default flags requested for the range. The API can |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 92 | * be used 2 ways. The first one where the HMM user coalesces |
| 93 | * multiple page faults into one request and sets flags per pfn for |
| 94 | * those faults. The second one where the HMM user wants to pre- |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 95 | * fault a range with specific flags. For the latter one it is a |
| 96 | * waste to have the user pre-fill the pfn arrays with a default |
| 97 | * flags value. |
| 98 | */ |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 99 | pfn_req_flags &= range->pfn_flags_mask; |
| 100 | pfn_req_flags |= range->default_flags; |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 101 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 102 | /* We aren't ask to do anything ... */ |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 103 | if (!(pfn_req_flags & HMM_PFN_REQ_FAULT)) |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 104 | return 0; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 105 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 106 | /* Need to write fault ? */ |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 107 | if ((pfn_req_flags & HMM_PFN_REQ_WRITE) && |
| 108 | !(cpu_flags & HMM_PFN_WRITE)) |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 109 | return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT; |
| 110 | |
| 111 | /* If CPU page table is not valid then we need to fault */ |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 112 | if (!(cpu_flags & HMM_PFN_VALID)) |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 113 | return HMM_NEED_FAULT; |
| 114 | return 0; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 115 | } |
| 116 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 117 | static unsigned int |
| 118 | hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 119 | const unsigned long hmm_pfns[], unsigned long npages, |
| 120 | unsigned long cpu_flags) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 121 | { |
Jason Gunthorpe | 6bfef2f | 2020-03-27 17:00:16 -0300 | [diff] [blame] | 122 | struct hmm_range *range = hmm_vma_walk->range; |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 123 | unsigned int required_fault = 0; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 124 | unsigned long i; |
| 125 | |
Jason Gunthorpe | 6bfef2f | 2020-03-27 17:00:16 -0300 | [diff] [blame] | 126 | /* |
| 127 | * If the default flags do not request to fault pages, and the mask does |
| 128 | * not allow for individual pages to be faulted, then |
| 129 | * hmm_pte_need_fault() will always return 0. |
| 130 | */ |
| 131 | if (!((range->default_flags | range->pfn_flags_mask) & |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 132 | HMM_PFN_REQ_FAULT)) |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 133 | return 0; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 134 | |
| 135 | for (i = 0; i < npages; ++i) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 136 | required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i], |
| 137 | cpu_flags); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 138 | if (required_fault == HMM_NEED_ALL_BITS) |
| 139 | return required_fault; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 140 | } |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 141 | return required_fault; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, |
Steven Price | b7a16c7 | 2020-02-03 17:36:03 -0800 | [diff] [blame] | 145 | __always_unused int depth, struct mm_walk *walk) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 146 | { |
| 147 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 148 | struct hmm_range *range = hmm_vma_walk->range; |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 149 | unsigned int required_fault; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 150 | unsigned long i, npages; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 151 | unsigned long *hmm_pfns; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 152 | |
| 153 | i = (addr - range->start) >> PAGE_SHIFT; |
| 154 | npages = (end - addr) >> PAGE_SHIFT; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 155 | hmm_pfns = &range->hmm_pfns[i]; |
| 156 | required_fault = |
| 157 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); |
Jason Gunthorpe | bd5d358 | 2020-03-27 17:00:21 -0300 | [diff] [blame] | 158 | if (!walk->vma) { |
| 159 | if (required_fault) |
| 160 | return -EFAULT; |
| 161 | return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR); |
| 162 | } |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 163 | if (required_fault) |
| 164 | return hmm_vma_fault(addr, end, required_fault, walk); |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 165 | return hmm_pfns_fill(addr, end, range, 0); |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 168 | static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range, |
| 169 | pmd_t pmd) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 170 | { |
| 171 | if (pmd_protnone(pmd)) |
| 172 | return 0; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 173 | return pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 174 | } |
| 175 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 176 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 177 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 178 | unsigned long end, unsigned long hmm_pfns[], |
| 179 | pmd_t pmd) |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 180 | { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 181 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 182 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 183 | unsigned long pfn, npages, i; |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 184 | unsigned int required_fault; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 185 | unsigned long cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 186 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 187 | npages = (end - addr) >> PAGE_SHIFT; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 188 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 189 | required_fault = |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 190 | hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 191 | if (required_fault) |
| 192 | return hmm_vma_fault(addr, end, required_fault, walk); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 193 | |
Christoph Hellwig | 309f9a4 | 2019-08-06 19:05:47 +0300 | [diff] [blame] | 194 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
Jason Gunthorpe | 068354a | 2020-03-27 17:00:13 -0300 | [diff] [blame] | 195 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 196 | hmm_pfns[i] = pfn | cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 197 | return 0; |
| 198 | } |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 199 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 200 | /* stub to allow the code below to compile */ |
| 201 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 202 | unsigned long end, unsigned long hmm_pfns[], pmd_t pmd); |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 203 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 204 | |
Christoph Hellwig | 08ddddd | 2020-03-16 20:32:16 +0100 | [diff] [blame] | 205 | static inline bool hmm_is_device_private_entry(struct hmm_range *range, |
| 206 | swp_entry_t entry) |
| 207 | { |
| 208 | return is_device_private_entry(entry) && |
| 209 | device_private_entry_to_page(entry)->pgmap->owner == |
| 210 | range->dev_private_owner; |
| 211 | } |
| 212 | |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 213 | static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range, |
| 214 | pte_t pte) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 215 | { |
Philip Yang | 789c2af | 2019-05-23 16:32:31 -0400 | [diff] [blame] | 216 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 217 | return 0; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 218 | return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 219 | } |
| 220 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 221 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
| 222 | unsigned long end, pmd_t *pmdp, pte_t *ptep, |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 223 | unsigned long *hmm_pfn) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 224 | { |
| 225 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 226 | struct hmm_range *range = hmm_vma_walk->range; |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 227 | unsigned int required_fault; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 228 | unsigned long cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 229 | pte_t pte = *ptep; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 230 | uint64_t pfn_req_flags = *hmm_pfn; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 231 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 232 | if (pte_none(pte)) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 233 | required_fault = |
| 234 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 235 | if (required_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 236 | goto fault; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 237 | *hmm_pfn = 0; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 238 | return 0; |
| 239 | } |
| 240 | |
| 241 | if (!pte_present(pte)) { |
| 242 | swp_entry_t entry = pte_to_swp_entry(pte); |
| 243 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 244 | /* |
Christoph Hellwig | 17ffdc4 | 2020-03-16 20:32:15 +0100 | [diff] [blame] | 245 | * Never fault in device private pages pages, but just report |
| 246 | * the PFN even if not present. |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 247 | */ |
Christoph Hellwig | 08ddddd | 2020-03-16 20:32:16 +0100 | [diff] [blame] | 248 | if (hmm_is_device_private_entry(range, entry)) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 249 | cpu_flags = HMM_PFN_VALID; |
Christoph Hellwig | 17ffdc4 | 2020-03-16 20:32:15 +0100 | [diff] [blame] | 250 | if (is_write_device_private_entry(entry)) |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 251 | cpu_flags |= HMM_PFN_WRITE; |
| 252 | *hmm_pfn = device_private_entry_to_pfn(entry) | |
| 253 | cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 254 | return 0; |
| 255 | } |
| 256 | |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 257 | required_fault = |
| 258 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0); |
Jason Gunthorpe | 846babe | 2020-03-27 17:00:19 -0300 | [diff] [blame] | 259 | if (!required_fault) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 260 | *hmm_pfn = 0; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 261 | return 0; |
Jason Gunthorpe | 846babe | 2020-03-27 17:00:19 -0300 | [diff] [blame] | 262 | } |
Jason Gunthorpe | 76612d6 | 2020-02-28 15:52:32 -0400 | [diff] [blame] | 263 | |
| 264 | if (!non_swap_entry(entry)) |
| 265 | goto fault; |
| 266 | |
| 267 | if (is_migration_entry(entry)) { |
| 268 | pte_unmap(ptep); |
| 269 | hmm_vma_walk->last = addr; |
| 270 | migration_entry_wait(walk->mm, pmdp, addr); |
| 271 | return -EBUSY; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 272 | } |
| 273 | |
| 274 | /* Report error for everything else */ |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 275 | pte_unmap(ptep); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 276 | return -EFAULT; |
| 277 | } |
| 278 | |
Jason Gunthorpe | 76612d6 | 2020-02-28 15:52:32 -0400 | [diff] [blame] | 279 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 280 | required_fault = |
| 281 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 282 | if (required_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 283 | goto fault; |
| 284 | |
Jason Gunthorpe | 4055062 | 2020-03-05 14:27:20 -0400 | [diff] [blame] | 285 | /* |
| 286 | * Since each architecture defines a struct page for the zero page, just |
| 287 | * fall through and treat it like a normal page. |
| 288 | */ |
| 289 | if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 290 | if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) { |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 291 | pte_unmap(ptep); |
Ralph Campbell | ac541f2 | 2019-10-23 12:55:14 -0700 | [diff] [blame] | 292 | return -EFAULT; |
| 293 | } |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 294 | *hmm_pfn = HMM_PFN_ERROR; |
Jason Gunthorpe | 4055062 | 2020-03-05 14:27:20 -0400 | [diff] [blame] | 295 | return 0; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 296 | } |
| 297 | |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 298 | *hmm_pfn = pte_pfn(pte) | cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 299 | return 0; |
| 300 | |
| 301 | fault: |
| 302 | pte_unmap(ptep); |
| 303 | /* Fault any virtual address we were asked to fault */ |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 304 | return hmm_vma_fault(addr, end, required_fault, walk); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 305 | } |
| 306 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 307 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
| 308 | unsigned long start, |
| 309 | unsigned long end, |
| 310 | struct mm_walk *walk) |
| 311 | { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 312 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 313 | struct hmm_range *range = hmm_vma_walk->range; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 314 | unsigned long *hmm_pfns = |
| 315 | &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT]; |
Jason Gunthorpe | 2288a9a | 2020-03-05 15:26:33 -0400 | [diff] [blame] | 316 | unsigned long npages = (end - start) >> PAGE_SHIFT; |
| 317 | unsigned long addr = start; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 318 | pte_t *ptep; |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 319 | pmd_t pmd; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 320 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 321 | again: |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 322 | pmd = READ_ONCE(*pmdp); |
| 323 | if (pmd_none(pmd)) |
Steven Price | b7a16c7 | 2020-02-03 17:36:03 -0800 | [diff] [blame] | 324 | return hmm_vma_walk_hole(start, end, -1, walk); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 325 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 326 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 327 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) { |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 328 | hmm_vma_walk->last = addr; |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 329 | pmd_migration_entry_wait(walk->mm, pmdp); |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 330 | return -EBUSY; |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 331 | } |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 332 | return hmm_pfns_fill(start, end, range, 0); |
Jason Gunthorpe | 2288a9a | 2020-03-05 15:26:33 -0400 | [diff] [blame] | 333 | } |
| 334 | |
| 335 | if (!pmd_present(pmd)) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 336 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
Jason Gunthorpe | 2288a9a | 2020-03-05 15:26:33 -0400 | [diff] [blame] | 337 | return -EFAULT; |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 338 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
Jason Gunthorpe | 2288a9a | 2020-03-05 15:26:33 -0400 | [diff] [blame] | 339 | } |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 340 | |
| 341 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 342 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 343 | * No need to take pmd_lock here, even if some other thread |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 344 | * is splitting the huge pmd we will get that event through |
| 345 | * mmu_notifier callback. |
| 346 | * |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 347 | * So just read pmd value and check again it's a transparent |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 348 | * huge or device mapping one and compute corresponding pfn |
| 349 | * values. |
| 350 | */ |
| 351 | pmd = pmd_read_atomic(pmdp); |
| 352 | barrier(); |
| 353 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) |
| 354 | goto again; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 355 | |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 356 | return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 357 | } |
| 358 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 359 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 360 | * We have handled all the valid cases above ie either none, migration, |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 361 | * huge or transparent huge. At this point either it is a valid pmd |
| 362 | * entry pointing to pte directory or it is a bad pmd that will not |
| 363 | * recover. |
| 364 | */ |
Jason Gunthorpe | 2288a9a | 2020-03-05 15:26:33 -0400 | [diff] [blame] | 365 | if (pmd_bad(pmd)) { |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 366 | if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) |
Jason Gunthorpe | 2288a9a | 2020-03-05 15:26:33 -0400 | [diff] [blame] | 367 | return -EFAULT; |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 368 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
Jason Gunthorpe | 2288a9a | 2020-03-05 15:26:33 -0400 | [diff] [blame] | 369 | } |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 370 | |
| 371 | ptep = pte_offset_map(pmdp, addr); |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 372 | for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 373 | int r; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 374 | |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 375 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 376 | if (r) { |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 377 | /* hmm_vma_handle_pte() did pte_unmap() */ |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 378 | return r; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 379 | } |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 380 | } |
| 381 | pte_unmap(ptep - 1); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 382 | return 0; |
| 383 | } |
| 384 | |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 385 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ |
| 386 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 387 | static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range, |
| 388 | pud_t pud) |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 389 | { |
| 390 | if (!pud_present(pud)) |
| 391 | return 0; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 392 | return pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID; |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, |
| 396 | struct mm_walk *walk) |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 397 | { |
| 398 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 399 | struct hmm_range *range = hmm_vma_walk->range; |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 400 | unsigned long addr = start; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 401 | pud_t pud; |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 402 | int ret = 0; |
| 403 | spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 404 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 405 | if (!ptl) |
| 406 | return 0; |
| 407 | |
| 408 | /* Normally we don't want to split the huge page */ |
| 409 | walk->action = ACTION_CONTINUE; |
| 410 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 411 | pud = READ_ONCE(*pudp); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 412 | if (pud_none(pud)) { |
Jason Gunthorpe | 05fc1df | 2020-03-02 15:26:44 -0400 | [diff] [blame] | 413 | spin_unlock(ptl); |
| 414 | return hmm_vma_walk_hole(start, end, -1, walk); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 415 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 416 | |
| 417 | if (pud_huge(pud) && pud_devmap(pud)) { |
| 418 | unsigned long i, npages, pfn; |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 419 | unsigned int required_fault; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 420 | unsigned long *hmm_pfns; |
| 421 | unsigned long cpu_flags; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 422 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 423 | if (!pud_present(pud)) { |
Jason Gunthorpe | 05fc1df | 2020-03-02 15:26:44 -0400 | [diff] [blame] | 424 | spin_unlock(ptl); |
| 425 | return hmm_vma_walk_hole(start, end, -1, walk); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 426 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 427 | |
| 428 | i = (addr - range->start) >> PAGE_SHIFT; |
| 429 | npages = (end - addr) >> PAGE_SHIFT; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 430 | hmm_pfns = &range->hmm_pfns[i]; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 431 | |
| 432 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 433 | required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns, |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 434 | npages, cpu_flags); |
| 435 | if (required_fault) { |
Jason Gunthorpe | 05fc1df | 2020-03-02 15:26:44 -0400 | [diff] [blame] | 436 | spin_unlock(ptl); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 437 | return hmm_vma_fault(addr, end, required_fault, walk); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 438 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 439 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 440 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
Jason Gunthorpe | 068354a | 2020-03-27 17:00:13 -0300 | [diff] [blame] | 441 | for (i = 0; i < npages; ++i, ++pfn) |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 442 | hmm_pfns[i] = pfn | cpu_flags; |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 443 | goto out_unlock; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 444 | } |
| 445 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 446 | /* Ask for the PUD to be split */ |
| 447 | walk->action = ACTION_SUBTREE; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 448 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 449 | out_unlock: |
| 450 | spin_unlock(ptl); |
| 451 | return ret; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 452 | } |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 453 | #else |
| 454 | #define hmm_vma_walk_pud NULL |
| 455 | #endif |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 456 | |
Christoph Hellwig | 251bbe5 | 2019-08-06 19:05:50 +0300 | [diff] [blame] | 457 | #ifdef CONFIG_HUGETLB_PAGE |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 458 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
| 459 | unsigned long start, unsigned long end, |
| 460 | struct mm_walk *walk) |
| 461 | { |
Christoph Hellwig | 05c23af | 2019-08-06 19:05:46 +0300 | [diff] [blame] | 462 | unsigned long addr = start, i, pfn; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 463 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 464 | struct hmm_range *range = hmm_vma_walk->range; |
| 465 | struct vm_area_struct *vma = walk->vma; |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 466 | unsigned int required_fault; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 467 | unsigned long pfn_req_flags; |
| 468 | unsigned long cpu_flags; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 469 | spinlock_t *ptl; |
| 470 | pte_t entry; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 471 | |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 472 | ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 473 | entry = huge_ptep_get(pte); |
| 474 | |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 475 | i = (start - range->start) >> PAGE_SHIFT; |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 476 | pfn_req_flags = range->hmm_pfns[i]; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 477 | cpu_flags = pte_to_hmm_pfn_flags(range, entry); |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 478 | required_fault = |
| 479 | hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 480 | if (required_fault) { |
Christoph Hellwig | 4505069 | 2020-03-16 14:53:08 +0100 | [diff] [blame] | 481 | spin_unlock(ptl); |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 482 | return hmm_vma_fault(addr, end, required_fault, walk); |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 483 | } |
| 484 | |
Christoph Hellwig | 05c23af | 2019-08-06 19:05:46 +0300 | [diff] [blame] | 485 | pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 486 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 487 | range->hmm_pfns[i] = pfn | cpu_flags; |
| 488 | |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 489 | spin_unlock(ptl); |
Christoph Hellwig | 4505069 | 2020-03-16 14:53:08 +0100 | [diff] [blame] | 490 | return 0; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 491 | } |
Christoph Hellwig | 251bbe5 | 2019-08-06 19:05:50 +0300 | [diff] [blame] | 492 | #else |
| 493 | #define hmm_vma_walk_hugetlb_entry NULL |
| 494 | #endif /* CONFIG_HUGETLB_PAGE */ |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 495 | |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 496 | static int hmm_vma_walk_test(unsigned long start, unsigned long end, |
| 497 | struct mm_walk *walk) |
Jérôme Glisse | 33cd47d | 2018-04-10 16:28:54 -0700 | [diff] [blame] | 498 | { |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 499 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 500 | struct hmm_range *range = hmm_vma_walk->range; |
| 501 | struct vm_area_struct *vma = walk->vma; |
| 502 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 503 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) && |
| 504 | vma->vm_flags & VM_READ) |
| 505 | return 0; |
| 506 | |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 507 | /* |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 508 | * vma ranges that don't have struct page backing them or map I/O |
| 509 | * devices directly cannot be handled by hmm_range_fault(). |
Jason Gunthorpe | c2579c9 | 2020-03-05 12:00:22 -0400 | [diff] [blame] | 510 | * |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 511 | * If the vma does not allow read access, then assume that it does not |
Jason Gunthorpe | c2579c9 | 2020-03-05 12:00:22 -0400 | [diff] [blame] | 512 | * allow write access either. HMM does not support architectures that |
| 513 | * allow write without read. |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 514 | * |
| 515 | * If a fault is requested for an unsupported range then it is a hard |
| 516 | * failure. |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 517 | */ |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 518 | if (hmm_range_need_fault(hmm_vma_walk, |
Jason Gunthorpe | 2733ea1 | 2020-05-01 15:20:48 -0300 | [diff] [blame] | 519 | range->hmm_pfns + |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 520 | ((start - range->start) >> PAGE_SHIFT), |
| 521 | (end - start) >> PAGE_SHIFT, 0)) |
| 522 | return -EFAULT; |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 523 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 524 | hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 525 | |
Jason Gunthorpe | a3eb13c | 2020-03-27 17:00:14 -0300 | [diff] [blame] | 526 | /* Skip this vma and continue processing the next vma. */ |
| 527 | return 1; |
Jérôme Glisse | 33cd47d | 2018-04-10 16:28:54 -0700 | [diff] [blame] | 528 | } |
| 529 | |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 530 | static const struct mm_walk_ops hmm_walk_ops = { |
| 531 | .pud_entry = hmm_vma_walk_pud, |
| 532 | .pmd_entry = hmm_vma_walk_pmd, |
| 533 | .pte_hole = hmm_vma_walk_hole, |
| 534 | .hugetlb_entry = hmm_vma_walk_hugetlb_entry, |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 535 | .test_walk = hmm_vma_walk_test, |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 536 | }; |
| 537 | |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 538 | /** |
| 539 | * hmm_range_fault - try to fault some address in a virtual address range |
Jason Gunthorpe | f970b97 | 2020-03-27 17:00:15 -0300 | [diff] [blame] | 540 | * @range: argument structure |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 541 | * |
Jason Gunthorpe | be957c8 | 2020-05-01 15:20:45 -0300 | [diff] [blame] | 542 | * Returns 0 on success or one of the following error codes: |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 543 | * |
| 544 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma |
| 545 | * (e.g., device file vma). |
| 546 | * -ENOMEM: Out of memory. |
| 547 | * -EPERM: Invalid permission (e.g., asking for write and range is read |
| 548 | * only). |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 549 | * -EBUSY: The range has been invalidated and the caller needs to wait for |
| 550 | * the invalidation to finish. |
Jason Gunthorpe | f970b97 | 2020-03-27 17:00:15 -0300 | [diff] [blame] | 551 | * -EFAULT: A page was requested to be valid and could not be made valid |
| 552 | * ie it has no backing VMA or it is illegal to access |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 553 | * |
Jason Gunthorpe | f970b97 | 2020-03-27 17:00:15 -0300 | [diff] [blame] | 554 | * This is similar to get_user_pages(), except that it can read the page tables |
| 555 | * without mutating them (ie causing faults). |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 556 | */ |
Jason Gunthorpe | be957c8 | 2020-05-01 15:20:45 -0300 | [diff] [blame] | 557 | int hmm_range_fault(struct hmm_range *range) |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 558 | { |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 559 | struct hmm_vma_walk hmm_vma_walk = { |
| 560 | .range = range, |
| 561 | .last = range->start, |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 562 | }; |
Jason Gunthorpe | a22dd50 | 2019-11-12 16:22:30 -0400 | [diff] [blame] | 563 | struct mm_struct *mm = range->notifier->mm; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 564 | int ret; |
| 565 | |
Michel Lespinasse | 42fc541 | 2020-06-08 21:33:44 -0700 | [diff] [blame] | 566 | mmap_assert_locked(mm); |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 567 | |
| 568 | do { |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 569 | /* If range is no longer valid force retry. */ |
Jason Gunthorpe | a22dd50 | 2019-11-12 16:22:30 -0400 | [diff] [blame] | 570 | if (mmu_interval_check_retry(range->notifier, |
| 571 | range->notifier_seq)) |
Christoph Hellwig | 2bcbeae | 2019-07-24 08:52:52 +0200 | [diff] [blame] | 572 | return -EBUSY; |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 573 | ret = walk_page_range(mm, hmm_vma_walk.last, range->end, |
| 574 | &hmm_walk_ops, &hmm_vma_walk); |
Jason Gunthorpe | be957c8 | 2020-05-01 15:20:45 -0300 | [diff] [blame] | 575 | /* |
| 576 | * When -EBUSY is returned the loop restarts with |
| 577 | * hmm_vma_walk.last set to an address that has not been stored |
| 578 | * in pfns. All entries < last in the pfn array are set to their |
| 579 | * output, and all >= are still at their input values. |
| 580 | */ |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 581 | } while (ret == -EBUSY); |
Jason Gunthorpe | be957c8 | 2020-05-01 15:20:45 -0300 | [diff] [blame] | 582 | return ret; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 583 | } |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 584 | EXPORT_SYMBOL(hmm_range_fault); |