Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2013 Red Hat Inc. |
| 4 | * |
Jérôme Glisse | f813f21 | 2018-10-30 15:04:06 -0700 | [diff] [blame] | 5 | * Authors: Jérôme Glisse <jglisse@redhat.com> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 6 | */ |
| 7 | /* |
| 8 | * Refer to include/linux/hmm.h for information about heterogeneous memory |
| 9 | * management or HMM for short. |
| 10 | */ |
Christoph Hellwig | a520110 | 2019-08-28 16:19:53 +0200 | [diff] [blame] | 11 | #include <linux/pagewalk.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 12 | #include <linux/hmm.h> |
Jérôme Glisse | 858b54d | 2017-09-08 16:12:02 -0700 | [diff] [blame] | 13 | #include <linux/init.h> |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 14 | #include <linux/rmap.h> |
| 15 | #include <linux/swap.h> |
Jérôme Glisse | 133ff0e | 2017-09-08 16:11:23 -0700 | [diff] [blame] | 16 | #include <linux/slab.h> |
| 17 | #include <linux/sched.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 18 | #include <linux/mmzone.h> |
| 19 | #include <linux/pagemap.h> |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 20 | #include <linux/swapops.h> |
| 21 | #include <linux/hugetlb.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 22 | #include <linux/memremap.h> |
Jason Gunthorpe | c8a53b2 | 2019-05-23 10:36:46 -0300 | [diff] [blame] | 23 | #include <linux/sched/mm.h> |
Jérôme Glisse | 7b2d55d2 | 2017-09-08 16:11:46 -0700 | [diff] [blame] | 24 | #include <linux/jump_label.h> |
Jérôme Glisse | 55c0ece | 2019-05-13 17:20:28 -0700 | [diff] [blame] | 25 | #include <linux/dma-mapping.h> |
Jérôme Glisse | c0b1240 | 2017-09-08 16:11:27 -0700 | [diff] [blame] | 26 | #include <linux/mmu_notifier.h> |
Jérôme Glisse | 4ef589d | 2017-09-08 16:11:58 -0700 | [diff] [blame] | 27 | #include <linux/memory_hotplug.h> |
| 28 | |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 29 | struct hmm_vma_walk { |
| 30 | struct hmm_range *range; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 31 | struct dev_pagemap *pgmap; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 32 | unsigned long last; |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 33 | unsigned int flags; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 34 | }; |
| 35 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 36 | static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, |
| 37 | bool write_fault, uint64_t *pfn) |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 38 | { |
Kuehling, Felix | 9b1ae60 | 2019-05-10 19:53:24 +0000 | [diff] [blame] | 39 | unsigned int flags = FAULT_FLAG_REMOTE; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 40 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 41 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 42 | struct vm_area_struct *vma = walk->vma; |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 43 | vm_fault_t ret; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 44 | |
Ralph Campbell | 6c64f2b | 2019-08-23 15:17:52 -0700 | [diff] [blame] | 45 | if (!vma) |
| 46 | goto err; |
| 47 | |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 48 | if (hmm_vma_walk->flags & HMM_FAULT_ALLOW_RETRY) |
| 49 | flags |= FAULT_FLAG_ALLOW_RETRY; |
| 50 | if (write_fault) |
| 51 | flags |= FAULT_FLAG_WRITE; |
| 52 | |
Souptick Joarder | 50a7ca3 | 2018-08-17 15:44:47 -0700 | [diff] [blame] | 53 | ret = handle_mm_fault(vma, addr, flags); |
Jason Gunthorpe | e709acc | 2019-07-24 08:52:58 +0200 | [diff] [blame] | 54 | if (ret & VM_FAULT_RETRY) { |
| 55 | /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */ |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 56 | return -EAGAIN; |
Jason Gunthorpe | e709acc | 2019-07-24 08:52:58 +0200 | [diff] [blame] | 57 | } |
Ralph Campbell | 6c64f2b | 2019-08-23 15:17:52 -0700 | [diff] [blame] | 58 | if (ret & VM_FAULT_ERROR) |
| 59 | goto err; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 60 | |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 61 | return -EBUSY; |
Ralph Campbell | 6c64f2b | 2019-08-23 15:17:52 -0700 | [diff] [blame] | 62 | |
| 63 | err: |
| 64 | *pfn = range->values[HMM_PFN_ERROR]; |
| 65 | return -EFAULT; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 66 | } |
| 67 | |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 68 | static int hmm_pfns_fill(unsigned long addr, unsigned long end, |
| 69 | struct hmm_range *range, enum hmm_pfn_value_e value) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 70 | { |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 71 | uint64_t *pfns = range->pfns; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 72 | unsigned long i; |
| 73 | |
| 74 | i = (addr - range->start) >> PAGE_SHIFT; |
| 75 | for (; addr < end; addr += PAGE_SIZE, i++) |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 76 | pfns[i] = range->values[value]; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 77 | |
| 78 | return 0; |
| 79 | } |
| 80 | |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 81 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 82 | * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s) |
| 83 | * @addr: range virtual start address (inclusive) |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 84 | * @end: range virtual end address (exclusive) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 85 | * @fault: should we fault or not ? |
| 86 | * @write_fault: write fault ? |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 87 | * @walk: mm_walk structure |
Ralph Campbell | 085ea25 | 2019-05-06 16:29:39 -0700 | [diff] [blame] | 88 | * Return: 0 on success, -EBUSY after page fault, or page fault error |
Jérôme Glisse | 5504ed2 | 2018-04-10 16:28:46 -0700 | [diff] [blame] | 89 | * |
| 90 | * This function will be called whenever pmd_none() or pte_none() returns true, |
| 91 | * or whenever there is no page directory covering the virtual address range. |
| 92 | */ |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 93 | static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, |
| 94 | bool fault, bool write_fault, |
| 95 | struct mm_walk *walk) |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 96 | { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 97 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 98 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 99 | uint64_t *pfns = range->pfns; |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 100 | unsigned long i; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 101 | |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 102 | hmm_vma_walk->last = addr; |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 103 | i = (addr - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 104 | |
Ralph Campbell | c18ce674d | 2019-08-23 15:17:53 -0700 | [diff] [blame] | 105 | if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE)) |
| 106 | return -EPERM; |
| 107 | |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 108 | for (; addr < end; addr += PAGE_SIZE, i++) { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 109 | pfns[i] = range->values[HMM_PFN_NONE]; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 110 | if (fault || write_fault) { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 111 | int ret; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 112 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 113 | ret = hmm_vma_do_fault(walk, addr, write_fault, |
| 114 | &pfns[i]); |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 115 | if (ret != -EBUSY) |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 116 | return ret; |
| 117 | } |
| 118 | } |
| 119 | |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 120 | return (fault || write_fault) ? -EBUSY : 0; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
| 124 | uint64_t pfns, uint64_t cpu_flags, |
| 125 | bool *fault, bool *write_fault) |
| 126 | { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 127 | struct hmm_range *range = hmm_vma_walk->range; |
| 128 | |
Christoph Hellwig | d45d464 | 2019-07-25 17:56:47 -0700 | [diff] [blame] | 129 | if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 130 | return; |
| 131 | |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 132 | /* |
| 133 | * So we not only consider the individual per page request we also |
| 134 | * consider the default flags requested for the range. The API can |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 135 | * be used 2 ways. The first one where the HMM user coalesces |
| 136 | * multiple page faults into one request and sets flags per pfn for |
| 137 | * those faults. The second one where the HMM user wants to pre- |
Jérôme Glisse | 023a019 | 2019-05-13 17:20:05 -0700 | [diff] [blame] | 138 | * fault a range with specific flags. For the latter one it is a |
| 139 | * waste to have the user pre-fill the pfn arrays with a default |
| 140 | * flags value. |
| 141 | */ |
| 142 | pfns = (pfns & range->pfn_flags_mask) | range->default_flags; |
| 143 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 144 | /* We aren't ask to do anything ... */ |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 145 | if (!(pfns & range->flags[HMM_PFN_VALID])) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 146 | return; |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 147 | /* If this is device memory then only fault if explicitly requested */ |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 148 | if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) { |
| 149 | /* Do we fault on device memory ? */ |
| 150 | if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { |
| 151 | *write_fault = pfns & range->flags[HMM_PFN_WRITE]; |
| 152 | *fault = true; |
| 153 | } |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 154 | return; |
| 155 | } |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 156 | |
| 157 | /* If CPU page table is not valid then we need to fault */ |
| 158 | *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]); |
| 159 | /* Need to write fault ? */ |
| 160 | if ((pfns & range->flags[HMM_PFN_WRITE]) && |
| 161 | !(cpu_flags & range->flags[HMM_PFN_WRITE])) { |
| 162 | *write_fault = true; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 163 | *fault = true; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk, |
| 168 | const uint64_t *pfns, unsigned long npages, |
| 169 | uint64_t cpu_flags, bool *fault, |
| 170 | bool *write_fault) |
| 171 | { |
| 172 | unsigned long i; |
| 173 | |
Christoph Hellwig | d45d464 | 2019-07-25 17:56:47 -0700 | [diff] [blame] | 174 | if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT) { |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 175 | *fault = *write_fault = false; |
| 176 | return; |
| 177 | } |
| 178 | |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 179 | *fault = *write_fault = false; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 180 | for (i = 0; i < npages; ++i) { |
| 181 | hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags, |
| 182 | fault, write_fault); |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 183 | if ((*write_fault)) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 184 | return; |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, |
Steven Price | b7a16c7 | 2020-02-03 17:36:03 -0800 | [diff] [blame] | 189 | __always_unused int depth, struct mm_walk *walk) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 190 | { |
| 191 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 192 | struct hmm_range *range = hmm_vma_walk->range; |
| 193 | bool fault, write_fault; |
| 194 | unsigned long i, npages; |
| 195 | uint64_t *pfns; |
| 196 | |
| 197 | i = (addr - range->start) >> PAGE_SHIFT; |
| 198 | npages = (end - addr) >> PAGE_SHIFT; |
| 199 | pfns = &range->pfns[i]; |
| 200 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, |
| 201 | 0, &fault, &write_fault); |
| 202 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
| 203 | } |
| 204 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 205 | static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 206 | { |
| 207 | if (pmd_protnone(pmd)) |
| 208 | return 0; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 209 | return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | |
| 210 | range->flags[HMM_PFN_WRITE] : |
| 211 | range->flags[HMM_PFN_VALID]; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 212 | } |
| 213 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 214 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 215 | static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
| 216 | unsigned long end, uint64_t *pfns, pmd_t pmd) |
| 217 | { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 218 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 219 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 220 | unsigned long pfn, npages, i; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 221 | bool fault, write_fault; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 222 | uint64_t cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 223 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 224 | npages = (end - addr) >> PAGE_SHIFT; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 225 | cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 226 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags, |
| 227 | &fault, &write_fault); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 228 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 229 | if (pmd_protnone(pmd) || fault || write_fault) |
| 230 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 231 | |
Christoph Hellwig | 309f9a4 | 2019-08-06 19:05:47 +0300 | [diff] [blame] | 232 | pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 233 | for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { |
| 234 | if (pmd_devmap(pmd)) { |
| 235 | hmm_vma_walk->pgmap = get_dev_pagemap(pfn, |
| 236 | hmm_vma_walk->pgmap); |
| 237 | if (unlikely(!hmm_vma_walk->pgmap)) |
| 238 | return -EBUSY; |
| 239 | } |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 240 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 241 | } |
| 242 | if (hmm_vma_walk->pgmap) { |
| 243 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 244 | hmm_vma_walk->pgmap = NULL; |
| 245 | } |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 246 | hmm_vma_walk->last = end; |
| 247 | return 0; |
| 248 | } |
Christoph Hellwig | 9d3973d | 2019-08-06 19:05:49 +0300 | [diff] [blame] | 249 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 250 | /* stub to allow the code below to compile */ |
| 251 | int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, |
| 252 | unsigned long end, uint64_t *pfns, pmd_t pmd); |
| 253 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 254 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 255 | static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 256 | { |
Philip Yang | 789c2af | 2019-05-23 16:32:31 -0400 | [diff] [blame] | 257 | if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte)) |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 258 | return 0; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 259 | return pte_write(pte) ? range->flags[HMM_PFN_VALID] | |
| 260 | range->flags[HMM_PFN_WRITE] : |
| 261 | range->flags[HMM_PFN_VALID]; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 262 | } |
| 263 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 264 | static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, |
| 265 | unsigned long end, pmd_t *pmdp, pte_t *ptep, |
| 266 | uint64_t *pfn) |
| 267 | { |
| 268 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 269 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 270 | bool fault, write_fault; |
| 271 | uint64_t cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 272 | pte_t pte = *ptep; |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 273 | uint64_t orig_pfn = *pfn; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 274 | |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 275 | *pfn = range->values[HMM_PFN_NONE]; |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 276 | fault = write_fault = false; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 277 | |
| 278 | if (pte_none(pte)) { |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 279 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, |
| 280 | &fault, &write_fault); |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 281 | if (fault || write_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 282 | goto fault; |
| 283 | return 0; |
| 284 | } |
| 285 | |
| 286 | if (!pte_present(pte)) { |
| 287 | swp_entry_t entry = pte_to_swp_entry(pte); |
| 288 | |
| 289 | if (!non_swap_entry(entry)) { |
Yang, Philip | e3fe8e5 | 2019-08-15 20:52:56 +0000 | [diff] [blame] | 290 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
| 291 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 292 | &fault, &write_fault); |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 293 | if (fault || write_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 294 | goto fault; |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * This is a special swap entry, ignore migration, use |
| 300 | * device and report anything else as error. |
| 301 | */ |
| 302 | if (is_device_private_entry(entry)) { |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 303 | cpu_flags = range->flags[HMM_PFN_VALID] | |
| 304 | range->flags[HMM_PFN_DEVICE_PRIVATE]; |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 305 | cpu_flags |= is_write_device_private_entry(entry) ? |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 306 | range->flags[HMM_PFN_WRITE] : 0; |
| 307 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 308 | &fault, &write_fault); |
| 309 | if (fault || write_fault) |
| 310 | goto fault; |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 311 | *pfn = hmm_device_entry_from_pfn(range, |
| 312 | swp_offset(entry)); |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 313 | *pfn |= cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 314 | return 0; |
| 315 | } |
| 316 | |
| 317 | if (is_migration_entry(entry)) { |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 318 | if (fault || write_fault) { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 319 | pte_unmap(ptep); |
| 320 | hmm_vma_walk->last = addr; |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 321 | migration_entry_wait(walk->mm, pmdp, addr); |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 322 | return -EBUSY; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 323 | } |
| 324 | return 0; |
| 325 | } |
| 326 | |
| 327 | /* Report error for everything else */ |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 328 | pte_unmap(ptep); |
Jérôme Glisse | f88a1e9 | 2018-04-10 16:29:06 -0700 | [diff] [blame] | 329 | *pfn = range->values[HMM_PFN_ERROR]; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 330 | return -EFAULT; |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 331 | } else { |
| 332 | cpu_flags = pte_to_hmm_pfn_flags(range, pte); |
| 333 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 334 | &fault, &write_fault); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 337 | if (fault || write_fault) |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 338 | goto fault; |
| 339 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 340 | if (pte_devmap(pte)) { |
| 341 | hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), |
| 342 | hmm_vma_walk->pgmap); |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 343 | if (unlikely(!hmm_vma_walk->pgmap)) { |
| 344 | pte_unmap(ptep); |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 345 | return -EBUSY; |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 346 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 347 | } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { |
Ralph Campbell | ac541f2 | 2019-10-23 12:55:14 -0700 | [diff] [blame] | 348 | if (!is_zero_pfn(pte_pfn(pte))) { |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 349 | pte_unmap(ptep); |
Ralph Campbell | ac541f2 | 2019-10-23 12:55:14 -0700 | [diff] [blame] | 350 | *pfn = range->values[HMM_PFN_SPECIAL]; |
| 351 | return -EFAULT; |
| 352 | } |
| 353 | /* |
| 354 | * Since each architecture defines a struct page for the zero |
| 355 | * page, just fall through and treat it like a normal page. |
| 356 | */ |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 357 | } |
| 358 | |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 359 | *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 360 | return 0; |
| 361 | |
| 362 | fault: |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 363 | if (hmm_vma_walk->pgmap) { |
| 364 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 365 | hmm_vma_walk->pgmap = NULL; |
| 366 | } |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 367 | pte_unmap(ptep); |
| 368 | /* Fault any virtual address we were asked to fault */ |
Jérôme Glisse | 2aee09d | 2018-04-10 16:29:02 -0700 | [diff] [blame] | 369 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 370 | } |
| 371 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 372 | static int hmm_vma_walk_pmd(pmd_t *pmdp, |
| 373 | unsigned long start, |
| 374 | unsigned long end, |
| 375 | struct mm_walk *walk) |
| 376 | { |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 377 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 378 | struct hmm_range *range = hmm_vma_walk->range; |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 379 | uint64_t *pfns = range->pfns; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 380 | unsigned long addr = start, i; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 381 | pte_t *ptep; |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 382 | pmd_t pmd; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 383 | |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 384 | again: |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 385 | pmd = READ_ONCE(*pmdp); |
| 386 | if (pmd_none(pmd)) |
Steven Price | b7a16c7 | 2020-02-03 17:36:03 -0800 | [diff] [blame] | 387 | return hmm_vma_walk_hole(start, end, -1, walk); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 388 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 389 | if (thp_migration_supported() && is_pmd_migration_entry(pmd)) { |
| 390 | bool fault, write_fault; |
| 391 | unsigned long npages; |
| 392 | uint64_t *pfns; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 393 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 394 | i = (addr - range->start) >> PAGE_SHIFT; |
| 395 | npages = (end - addr) >> PAGE_SHIFT; |
| 396 | pfns = &range->pfns[i]; |
| 397 | |
| 398 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, |
| 399 | 0, &fault, &write_fault); |
| 400 | if (fault || write_fault) { |
| 401 | hmm_vma_walk->last = addr; |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 402 | pmd_migration_entry_wait(walk->mm, pmdp); |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 403 | return -EBUSY; |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 404 | } |
Jason Gunthorpe | 7d08298 | 2020-03-04 16:25:56 -0400 | [diff] [blame] | 405 | return hmm_pfns_fill(start, end, range, HMM_PFN_NONE); |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 406 | } else if (!pmd_present(pmd)) |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 407 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 408 | |
| 409 | if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) { |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 410 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 411 | * No need to take pmd_lock here, even if some other thread |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 412 | * is splitting the huge pmd we will get that event through |
| 413 | * mmu_notifier callback. |
| 414 | * |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 415 | * So just read pmd value and check again it's a transparent |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 416 | * huge or device mapping one and compute corresponding pfn |
| 417 | * values. |
| 418 | */ |
| 419 | pmd = pmd_read_atomic(pmdp); |
| 420 | barrier(); |
| 421 | if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) |
| 422 | goto again; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 423 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 424 | i = (addr - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 425 | return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 426 | } |
| 427 | |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 428 | /* |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 429 | * We have handled all the valid cases above ie either none, migration, |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 430 | * huge or transparent huge. At this point either it is a valid pmd |
| 431 | * entry pointing to pte directory or it is a bad pmd that will not |
| 432 | * recover. |
| 433 | */ |
| 434 | if (pmd_bad(pmd)) |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 435 | return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 436 | |
| 437 | ptep = pte_offset_map(pmdp, addr); |
Jérôme Glisse | d08faca | 2018-10-30 15:04:20 -0700 | [diff] [blame] | 438 | i = (addr - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 439 | for (; addr < end; addr += PAGE_SIZE, ptep++, i++) { |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 440 | int r; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 441 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 442 | r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); |
| 443 | if (r) { |
Jason Gunthorpe | dfdc220 | 2020-02-28 15:30:37 -0400 | [diff] [blame] | 444 | /* hmm_vma_handle_pte() did pte_unmap() */ |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 445 | hmm_vma_walk->last = addr; |
| 446 | return r; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 447 | } |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 448 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 449 | if (hmm_vma_walk->pgmap) { |
| 450 | /* |
| 451 | * We do put_dev_pagemap() here and not in hmm_vma_handle_pte() |
| 452 | * so that we can leverage get_dev_pagemap() optimization which |
| 453 | * will not re-take a reference on a pgmap if we already have |
| 454 | * one. |
| 455 | */ |
| 456 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 457 | hmm_vma_walk->pgmap = NULL; |
| 458 | } |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 459 | pte_unmap(ptep - 1); |
| 460 | |
Jérôme Glisse | 53f5c3f | 2018-04-10 16:28:59 -0700 | [diff] [blame] | 461 | hmm_vma_walk->last = addr; |
Jérôme Glisse | da4c3c7 | 2017-09-08 16:11:31 -0700 | [diff] [blame] | 462 | return 0; |
| 463 | } |
| 464 | |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 465 | #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \ |
| 466 | defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) |
| 467 | static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud) |
| 468 | { |
| 469 | if (!pud_present(pud)) |
| 470 | return 0; |
| 471 | return pud_write(pud) ? range->flags[HMM_PFN_VALID] | |
| 472 | range->flags[HMM_PFN_WRITE] : |
| 473 | range->flags[HMM_PFN_VALID]; |
| 474 | } |
| 475 | |
| 476 | static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, |
| 477 | struct mm_walk *walk) |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 478 | { |
| 479 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 480 | struct hmm_range *range = hmm_vma_walk->range; |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 481 | unsigned long addr = start; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 482 | pud_t pud; |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 483 | int ret = 0; |
| 484 | spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma); |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 485 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 486 | if (!ptl) |
| 487 | return 0; |
| 488 | |
| 489 | /* Normally we don't want to split the huge page */ |
| 490 | walk->action = ACTION_CONTINUE; |
| 491 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 492 | pud = READ_ONCE(*pudp); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 493 | if (pud_none(pud)) { |
Jason Gunthorpe | 05fc1df | 2020-03-02 15:26:44 -0400 | [diff] [blame] | 494 | spin_unlock(ptl); |
| 495 | return hmm_vma_walk_hole(start, end, -1, walk); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 496 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 497 | |
| 498 | if (pud_huge(pud) && pud_devmap(pud)) { |
| 499 | unsigned long i, npages, pfn; |
| 500 | uint64_t *pfns, cpu_flags; |
| 501 | bool fault, write_fault; |
| 502 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 503 | if (!pud_present(pud)) { |
Jason Gunthorpe | 05fc1df | 2020-03-02 15:26:44 -0400 | [diff] [blame] | 504 | spin_unlock(ptl); |
| 505 | return hmm_vma_walk_hole(start, end, -1, walk); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 506 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 507 | |
| 508 | i = (addr - range->start) >> PAGE_SHIFT; |
| 509 | npages = (end - addr) >> PAGE_SHIFT; |
| 510 | pfns = &range->pfns[i]; |
| 511 | |
| 512 | cpu_flags = pud_to_hmm_pfn_flags(range, pud); |
| 513 | hmm_range_need_fault(hmm_vma_walk, pfns, npages, |
| 514 | cpu_flags, &fault, &write_fault); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 515 | if (fault || write_fault) { |
Jason Gunthorpe | 05fc1df | 2020-03-02 15:26:44 -0400 | [diff] [blame] | 516 | spin_unlock(ptl); |
| 517 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, |
| 518 | walk); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 519 | } |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 520 | |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 521 | pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); |
| 522 | for (i = 0; i < npages; ++i, ++pfn) { |
| 523 | hmm_vma_walk->pgmap = get_dev_pagemap(pfn, |
| 524 | hmm_vma_walk->pgmap); |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 525 | if (unlikely(!hmm_vma_walk->pgmap)) { |
| 526 | ret = -EBUSY; |
| 527 | goto out_unlock; |
| 528 | } |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 529 | pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
| 530 | cpu_flags; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 531 | } |
| 532 | if (hmm_vma_walk->pgmap) { |
| 533 | put_dev_pagemap(hmm_vma_walk->pgmap); |
| 534 | hmm_vma_walk->pgmap = NULL; |
| 535 | } |
| 536 | hmm_vma_walk->last = end; |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 537 | goto out_unlock; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 538 | } |
| 539 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 540 | /* Ask for the PUD to be split */ |
| 541 | walk->action = ACTION_SUBTREE; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 542 | |
Steven Price | 3afc423 | 2020-02-03 17:35:45 -0800 | [diff] [blame] | 543 | out_unlock: |
| 544 | spin_unlock(ptl); |
| 545 | return ret; |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 546 | } |
Christoph Hellwig | f0b3c45 | 2019-08-06 19:05:48 +0300 | [diff] [blame] | 547 | #else |
| 548 | #define hmm_vma_walk_pud NULL |
| 549 | #endif |
Jérôme Glisse | 992de9a | 2019-05-13 17:20:21 -0700 | [diff] [blame] | 550 | |
Christoph Hellwig | 251bbe5 | 2019-08-06 19:05:50 +0300 | [diff] [blame] | 551 | #ifdef CONFIG_HUGETLB_PAGE |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 552 | static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, |
| 553 | unsigned long start, unsigned long end, |
| 554 | struct mm_walk *walk) |
| 555 | { |
Christoph Hellwig | 05c23af | 2019-08-06 19:05:46 +0300 | [diff] [blame] | 556 | unsigned long addr = start, i, pfn; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 557 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 558 | struct hmm_range *range = hmm_vma_walk->range; |
| 559 | struct vm_area_struct *vma = walk->vma; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 560 | uint64_t orig_pfn, cpu_flags; |
| 561 | bool fault, write_fault; |
| 562 | spinlock_t *ptl; |
| 563 | pte_t entry; |
| 564 | int ret = 0; |
| 565 | |
Ralph Campbell | d2e8d55 | 2019-07-25 17:56:45 -0700 | [diff] [blame] | 566 | ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 567 | entry = huge_ptep_get(pte); |
| 568 | |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 569 | i = (start - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 570 | orig_pfn = range->pfns[i]; |
| 571 | range->pfns[i] = range->values[HMM_PFN_NONE]; |
| 572 | cpu_flags = pte_to_hmm_pfn_flags(range, entry); |
| 573 | fault = write_fault = false; |
| 574 | hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags, |
| 575 | &fault, &write_fault); |
| 576 | if (fault || write_fault) { |
| 577 | ret = -ENOENT; |
| 578 | goto unlock; |
| 579 | } |
| 580 | |
Christoph Hellwig | 05c23af | 2019-08-06 19:05:46 +0300 | [diff] [blame] | 581 | pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); |
Christoph Hellwig | 7f08263 | 2019-08-06 19:05:45 +0300 | [diff] [blame] | 582 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
Jérôme Glisse | 391aab1 | 2019-05-13 17:20:31 -0700 | [diff] [blame] | 583 | range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | |
| 584 | cpu_flags; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 585 | hmm_vma_walk->last = end; |
| 586 | |
| 587 | unlock: |
| 588 | spin_unlock(ptl); |
| 589 | |
| 590 | if (ret == -ENOENT) |
| 591 | return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); |
| 592 | |
| 593 | return ret; |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 594 | } |
Christoph Hellwig | 251bbe5 | 2019-08-06 19:05:50 +0300 | [diff] [blame] | 595 | #else |
| 596 | #define hmm_vma_walk_hugetlb_entry NULL |
| 597 | #endif /* CONFIG_HUGETLB_PAGE */ |
Jérôme Glisse | 63d5066 | 2019-05-13 17:20:18 -0700 | [diff] [blame] | 598 | |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 599 | static int hmm_vma_walk_test(unsigned long start, unsigned long end, |
| 600 | struct mm_walk *walk) |
Jérôme Glisse | 33cd47d | 2018-04-10 16:28:54 -0700 | [diff] [blame] | 601 | { |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 602 | struct hmm_vma_walk *hmm_vma_walk = walk->private; |
| 603 | struct hmm_range *range = hmm_vma_walk->range; |
| 604 | struct vm_area_struct *vma = walk->vma; |
| 605 | |
| 606 | /* |
Jason Gunthorpe | c2579c9 | 2020-03-05 12:00:22 -0400 | [diff] [blame^] | 607 | * Skip vma ranges that don't have struct page backing them or map I/O |
| 608 | * devices directly. |
| 609 | * |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 610 | * If the vma does not allow read access, then assume that it does not |
Jason Gunthorpe | c2579c9 | 2020-03-05 12:00:22 -0400 | [diff] [blame^] | 611 | * allow write access either. HMM does not support architectures that |
| 612 | * allow write without read. |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 613 | */ |
Jason Gunthorpe | c2579c9 | 2020-03-05 12:00:22 -0400 | [diff] [blame^] | 614 | if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) || |
| 615 | !(vma->vm_flags & VM_READ)) { |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 616 | bool fault, write_fault; |
| 617 | |
| 618 | /* |
| 619 | * Check to see if a fault is requested for any page in the |
| 620 | * range. |
| 621 | */ |
| 622 | hmm_range_need_fault(hmm_vma_walk, range->pfns + |
| 623 | ((start - range->start) >> PAGE_SHIFT), |
| 624 | (end - start) >> PAGE_SHIFT, |
| 625 | 0, &fault, &write_fault); |
| 626 | if (fault || write_fault) |
| 627 | return -EFAULT; |
| 628 | |
Jason Gunthorpe | c2579c9 | 2020-03-05 12:00:22 -0400 | [diff] [blame^] | 629 | hmm_pfns_fill(start, end, range, HMM_PFN_ERROR); |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 630 | hmm_vma_walk->last = end; |
| 631 | |
| 632 | /* Skip this vma and continue processing the next vma. */ |
| 633 | return 1; |
| 634 | } |
| 635 | |
| 636 | return 0; |
Jérôme Glisse | 33cd47d | 2018-04-10 16:28:54 -0700 | [diff] [blame] | 637 | } |
| 638 | |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 639 | static const struct mm_walk_ops hmm_walk_ops = { |
| 640 | .pud_entry = hmm_vma_walk_pud, |
| 641 | .pmd_entry = hmm_vma_walk_pmd, |
| 642 | .pte_hole = hmm_vma_walk_hole, |
| 643 | .hugetlb_entry = hmm_vma_walk_hugetlb_entry, |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 644 | .test_walk = hmm_vma_walk_test, |
Christoph Hellwig | 7b86ac3 | 2019-08-28 16:19:54 +0200 | [diff] [blame] | 645 | }; |
| 646 | |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 647 | /** |
| 648 | * hmm_range_fault - try to fault some address in a virtual address range |
| 649 | * @range: range being faulted |
| 650 | * @flags: HMM_FAULT_* flags |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 651 | * |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 652 | * Return: the number of valid pages in range->pfns[] (from range start |
| 653 | * address), which may be zero. On error one of the following status codes |
| 654 | * can be returned: |
| 655 | * |
| 656 | * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma |
| 657 | * (e.g., device file vma). |
| 658 | * -ENOMEM: Out of memory. |
| 659 | * -EPERM: Invalid permission (e.g., asking for write and range is read |
| 660 | * only). |
| 661 | * -EAGAIN: A page fault needs to be retried and mmap_sem was dropped. |
| 662 | * -EBUSY: The range has been invalidated and the caller needs to wait for |
| 663 | * the invalidation to finish. |
| 664 | * -EFAULT: Invalid (i.e., either no valid vma or it is illegal to access |
| 665 | * that range) number of valid pages in range->pfns[] (from |
| 666 | * range start address). |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 667 | * |
| 668 | * This is similar to a regular CPU page fault except that it will not trigger |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 669 | * any memory migration if the memory being faulted is not accessible by CPUs |
| 670 | * and caller does not ask for migration. |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 671 | * |
Jérôme Glisse | ff05c0c | 2018-04-10 16:28:38 -0700 | [diff] [blame] | 672 | * On error, for one virtual address in the range, the function will mark the |
| 673 | * corresponding HMM pfn entry with an error flag. |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 674 | */ |
Christoph Hellwig | 9a4903e | 2019-07-25 17:56:46 -0700 | [diff] [blame] | 675 | long hmm_range_fault(struct hmm_range *range, unsigned int flags) |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 676 | { |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 677 | struct hmm_vma_walk hmm_vma_walk = { |
| 678 | .range = range, |
| 679 | .last = range->start, |
| 680 | .flags = flags, |
| 681 | }; |
Jason Gunthorpe | a22dd50 | 2019-11-12 16:22:30 -0400 | [diff] [blame] | 682 | struct mm_struct *mm = range->notifier->mm; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 683 | int ret; |
| 684 | |
Jason Gunthorpe | 04ec32f | 2019-11-12 16:22:20 -0400 | [diff] [blame] | 685 | lockdep_assert_held(&mm->mmap_sem); |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 686 | |
| 687 | do { |
Jérôme Glisse | a3e0d41 | 2019-05-13 17:20:01 -0700 | [diff] [blame] | 688 | /* If range is no longer valid force retry. */ |
Jason Gunthorpe | a22dd50 | 2019-11-12 16:22:30 -0400 | [diff] [blame] | 689 | if (mmu_interval_check_retry(range->notifier, |
| 690 | range->notifier_seq)) |
Christoph Hellwig | 2bcbeae | 2019-07-24 08:52:52 +0200 | [diff] [blame] | 691 | return -EBUSY; |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 692 | ret = walk_page_range(mm, hmm_vma_walk.last, range->end, |
| 693 | &hmm_walk_ops, &hmm_vma_walk); |
| 694 | } while (ret == -EBUSY); |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 695 | |
Ralph Campbell | d28c2c9a | 2019-11-04 14:21:40 -0800 | [diff] [blame] | 696 | if (ret) |
| 697 | return ret; |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 698 | return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT; |
Jérôme Glisse | 74eee18 | 2017-09-08 16:11:35 -0700 | [diff] [blame] | 699 | } |
Jérôme Glisse | 7323161 | 2019-05-13 17:19:58 -0700 | [diff] [blame] | 700 | EXPORT_SYMBOL(hmm_range_fault); |