Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Virtual Memory Map support |
| 4 | * |
Christoph Lameter | cde5353 | 2008-07-04 09:59:22 -0700 | [diff] [blame] | 5 | * (C) 2007 sgi. Christoph Lameter. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 6 | * |
| 7 | * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, |
| 8 | * virt_to_page, page_address() to be implemented as a base offset |
| 9 | * calculation without memory access. |
| 10 | * |
| 11 | * However, virtual mappings need a page table and TLBs. Many Linux |
| 12 | * architectures already map their physical space using 1-1 mappings |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 13 | * via TLBs. For those arches the virtual memory map is essentially |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 14 | * for free if we use the same page size as the 1-1 mappings. In that |
| 15 | * case the overhead consists of a few additional pages that are |
| 16 | * allocated to create a view of memory for vmemmap. |
| 17 | * |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 18 | * The architecture is expected to provide a vmemmap_populate() function |
| 19 | * to instantiate the mapping. |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 20 | */ |
| 21 | #include <linux/mm.h> |
| 22 | #include <linux/mmzone.h> |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 23 | #include <linux/memblock.h> |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 24 | #include <linux/memremap.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 25 | #include <linux/highmem.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 27 | #include <linux/spinlock.h> |
| 28 | #include <linux/vmalloc.h> |
Glauber de Oliveira Costa | 8bca44b | 2007-10-29 14:37:19 -0700 | [diff] [blame] | 29 | #include <linux/sched.h> |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 30 | #include <linux/pgtable.h> |
| 31 | #include <linux/bootmem_info.h> |
| 32 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 33 | #include <asm/dma.h> |
| 34 | #include <asm/pgalloc.h> |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 35 | #include <asm/tlbflush.h> |
| 36 | |
| 37 | /** |
| 38 | * struct vmemmap_remap_walk - walk vmemmap page table |
| 39 | * |
| 40 | * @remap_pte: called for each lowest-level entry (PTE). |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 41 | * @nr_walked: the number of walked pte. |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 42 | * @reuse_page: the page which is reused for the tail vmemmap pages. |
| 43 | * @reuse_addr: the virtual address of the @reuse_page page. |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 44 | * @vmemmap_pages: the list head of the vmemmap pages that can be freed |
| 45 | * or is mapped from. |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 46 | */ |
| 47 | struct vmemmap_remap_walk { |
| 48 | void (*remap_pte)(pte_t *pte, unsigned long addr, |
| 49 | struct vmemmap_remap_walk *walk); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 50 | unsigned long nr_walked; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 51 | struct page *reuse_page; |
| 52 | unsigned long reuse_addr; |
| 53 | struct list_head *vmemmap_pages; |
| 54 | }; |
| 55 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 56 | static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start, |
| 57 | struct vmemmap_remap_walk *walk) |
| 58 | { |
| 59 | pmd_t __pmd; |
| 60 | int i; |
| 61 | unsigned long addr = start; |
| 62 | struct page *page = pmd_page(*pmd); |
| 63 | pte_t *pgtable = pte_alloc_one_kernel(&init_mm); |
| 64 | |
| 65 | if (!pgtable) |
| 66 | return -ENOMEM; |
| 67 | |
| 68 | pmd_populate_kernel(&init_mm, &__pmd, pgtable); |
| 69 | |
| 70 | for (i = 0; i < PMD_SIZE / PAGE_SIZE; i++, addr += PAGE_SIZE) { |
| 71 | pte_t entry, *pte; |
| 72 | pgprot_t pgprot = PAGE_KERNEL; |
| 73 | |
| 74 | entry = mk_pte(page + i, pgprot); |
| 75 | pte = pte_offset_kernel(&__pmd, addr); |
| 76 | set_pte_at(&init_mm, addr, pte, entry); |
| 77 | } |
| 78 | |
| 79 | /* Make pte visible before pmd. See comment in __pte_alloc(). */ |
| 80 | smp_wmb(); |
| 81 | pmd_populate_kernel(&init_mm, pmd, pgtable); |
| 82 | |
| 83 | flush_tlb_kernel_range(start, start + PMD_SIZE); |
| 84 | |
| 85 | return 0; |
| 86 | } |
| 87 | |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 88 | static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, |
| 89 | unsigned long end, |
| 90 | struct vmemmap_remap_walk *walk) |
| 91 | { |
| 92 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 93 | |
| 94 | /* |
| 95 | * The reuse_page is found 'first' in table walk before we start |
| 96 | * remapping (which is calling @walk->remap_pte). |
| 97 | */ |
| 98 | if (!walk->reuse_page) { |
| 99 | walk->reuse_page = pte_page(*pte); |
| 100 | /* |
| 101 | * Because the reuse address is part of the range that we are |
| 102 | * walking, skip the reuse address range. |
| 103 | */ |
| 104 | addr += PAGE_SIZE; |
| 105 | pte++; |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 106 | walk->nr_walked++; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 107 | } |
| 108 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 109 | for (; addr != end; addr += PAGE_SIZE, pte++) { |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 110 | walk->remap_pte(pte, addr, walk); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 111 | walk->nr_walked++; |
| 112 | } |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 115 | static int vmemmap_pmd_range(pud_t *pud, unsigned long addr, |
| 116 | unsigned long end, |
| 117 | struct vmemmap_remap_walk *walk) |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 118 | { |
| 119 | pmd_t *pmd; |
| 120 | unsigned long next; |
| 121 | |
| 122 | pmd = pmd_offset(pud, addr); |
| 123 | do { |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 124 | if (pmd_leaf(*pmd)) { |
| 125 | int ret; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 126 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 127 | ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK, walk); |
| 128 | if (ret) |
| 129 | return ret; |
| 130 | } |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 131 | next = pmd_addr_end(addr, end); |
| 132 | vmemmap_pte_range(pmd, addr, next, walk); |
| 133 | } while (pmd++, addr = next, addr != end); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 134 | |
| 135 | return 0; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 136 | } |
| 137 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 138 | static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr, |
| 139 | unsigned long end, |
| 140 | struct vmemmap_remap_walk *walk) |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 141 | { |
| 142 | pud_t *pud; |
| 143 | unsigned long next; |
| 144 | |
| 145 | pud = pud_offset(p4d, addr); |
| 146 | do { |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 147 | int ret; |
| 148 | |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 149 | next = pud_addr_end(addr, end); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 150 | ret = vmemmap_pmd_range(pud, addr, next, walk); |
| 151 | if (ret) |
| 152 | return ret; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 153 | } while (pud++, addr = next, addr != end); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 154 | |
| 155 | return 0; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 156 | } |
| 157 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 158 | static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr, |
| 159 | unsigned long end, |
| 160 | struct vmemmap_remap_walk *walk) |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 161 | { |
| 162 | p4d_t *p4d; |
| 163 | unsigned long next; |
| 164 | |
| 165 | p4d = p4d_offset(pgd, addr); |
| 166 | do { |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 167 | int ret; |
| 168 | |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 169 | next = p4d_addr_end(addr, end); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 170 | ret = vmemmap_pud_range(p4d, addr, next, walk); |
| 171 | if (ret) |
| 172 | return ret; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 173 | } while (p4d++, addr = next, addr != end); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 174 | |
| 175 | return 0; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 176 | } |
| 177 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 178 | static int vmemmap_remap_range(unsigned long start, unsigned long end, |
| 179 | struct vmemmap_remap_walk *walk) |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 180 | { |
| 181 | unsigned long addr = start; |
| 182 | unsigned long next; |
| 183 | pgd_t *pgd; |
| 184 | |
| 185 | VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE)); |
| 186 | VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE)); |
| 187 | |
| 188 | pgd = pgd_offset_k(addr); |
| 189 | do { |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 190 | int ret; |
| 191 | |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 192 | next = pgd_addr_end(addr, end); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 193 | ret = vmemmap_p4d_range(pgd, addr, next, walk); |
| 194 | if (ret) |
| 195 | return ret; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 196 | } while (pgd++, addr = next, addr != end); |
| 197 | |
| 198 | /* |
| 199 | * We only change the mapping of the vmemmap virtual address range |
| 200 | * [@start + PAGE_SIZE, end), so we only need to flush the TLB which |
| 201 | * belongs to the range. |
| 202 | */ |
| 203 | flush_tlb_kernel_range(start + PAGE_SIZE, end); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 204 | |
| 205 | return 0; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | /* |
| 209 | * Free a vmemmap page. A vmemmap page can be allocated from the memblock |
| 210 | * allocator or buddy allocator. If the PG_reserved flag is set, it means |
| 211 | * that it allocated from the memblock allocator, just free it via the |
| 212 | * free_bootmem_page(). Otherwise, use __free_page(). |
| 213 | */ |
| 214 | static inline void free_vmemmap_page(struct page *page) |
| 215 | { |
| 216 | if (PageReserved(page)) |
| 217 | free_bootmem_page(page); |
| 218 | else |
| 219 | __free_page(page); |
| 220 | } |
| 221 | |
| 222 | /* Free a list of the vmemmap pages */ |
| 223 | static void free_vmemmap_page_list(struct list_head *list) |
| 224 | { |
| 225 | struct page *page, *next; |
| 226 | |
| 227 | list_for_each_entry_safe(page, next, list, lru) { |
| 228 | list_del(&page->lru); |
| 229 | free_vmemmap_page(page); |
| 230 | } |
| 231 | } |
| 232 | |
| 233 | static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, |
| 234 | struct vmemmap_remap_walk *walk) |
| 235 | { |
| 236 | /* |
| 237 | * Remap the tail pages as read-only to catch illegal write operation |
| 238 | * to the tail pages. |
| 239 | */ |
| 240 | pgprot_t pgprot = PAGE_KERNEL_RO; |
| 241 | pte_t entry = mk_pte(walk->reuse_page, pgprot); |
| 242 | struct page *page = pte_page(*pte); |
| 243 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 244 | list_add_tail(&page->lru, walk->vmemmap_pages); |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 245 | set_pte_at(&init_mm, addr, pte, entry); |
| 246 | } |
| 247 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 248 | static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, |
| 249 | struct vmemmap_remap_walk *walk) |
| 250 | { |
| 251 | pgprot_t pgprot = PAGE_KERNEL; |
| 252 | struct page *page; |
| 253 | void *to; |
| 254 | |
| 255 | BUG_ON(pte_page(*pte) != walk->reuse_page); |
| 256 | |
| 257 | page = list_first_entry(walk->vmemmap_pages, struct page, lru); |
| 258 | list_del(&page->lru); |
| 259 | to = page_to_virt(page); |
| 260 | copy_page(to, (void *)walk->reuse_addr); |
| 261 | |
| 262 | set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot)); |
| 263 | } |
| 264 | |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 265 | /** |
| 266 | * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end) |
| 267 | * to the page which @reuse is mapped to, then free vmemmap |
| 268 | * which the range are mapped to. |
| 269 | * @start: start address of the vmemmap virtual address range that we want |
| 270 | * to remap. |
| 271 | * @end: end address of the vmemmap virtual address range that we want to |
| 272 | * remap. |
| 273 | * @reuse: reuse address. |
| 274 | * |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 275 | * Return: %0 on success, negative error code otherwise. |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 276 | */ |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 277 | int vmemmap_remap_free(unsigned long start, unsigned long end, |
| 278 | unsigned long reuse) |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 279 | { |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 280 | int ret; |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 281 | LIST_HEAD(vmemmap_pages); |
| 282 | struct vmemmap_remap_walk walk = { |
| 283 | .remap_pte = vmemmap_remap_pte, |
| 284 | .reuse_addr = reuse, |
| 285 | .vmemmap_pages = &vmemmap_pages, |
| 286 | }; |
| 287 | |
| 288 | /* |
| 289 | * In order to make remapping routine most efficient for the huge pages, |
| 290 | * the routine of vmemmap page table walking has the following rules |
| 291 | * (see more details from the vmemmap_pte_range()): |
| 292 | * |
| 293 | * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE) |
| 294 | * should be continuous. |
| 295 | * - The @reuse address is part of the range [@reuse, @end) that we are |
| 296 | * walking which is passed to vmemmap_remap_range(). |
| 297 | * - The @reuse address is the first in the complete range. |
| 298 | * |
| 299 | * So we need to make sure that @start and @reuse meet the above rules. |
| 300 | */ |
| 301 | BUG_ON(start - reuse != PAGE_SIZE); |
| 302 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 303 | mmap_write_lock(&init_mm); |
| 304 | ret = vmemmap_remap_range(reuse, end, &walk); |
| 305 | mmap_write_downgrade(&init_mm); |
| 306 | |
| 307 | if (ret && walk.nr_walked) { |
| 308 | end = reuse + walk.nr_walked * PAGE_SIZE; |
| 309 | /* |
| 310 | * vmemmap_pages contains pages from the previous |
| 311 | * vmemmap_remap_range call which failed. These |
| 312 | * are pages which were removed from the vmemmap. |
| 313 | * They will be restored in the following call. |
| 314 | */ |
| 315 | walk = (struct vmemmap_remap_walk) { |
| 316 | .remap_pte = vmemmap_restore_pte, |
| 317 | .reuse_addr = reuse, |
| 318 | .vmemmap_pages = &vmemmap_pages, |
| 319 | }; |
| 320 | |
| 321 | vmemmap_remap_range(reuse, end, &walk); |
| 322 | } |
| 323 | mmap_read_unlock(&init_mm); |
| 324 | |
Muchun Song | f41f2ed | 2021-06-30 18:47:13 -0700 | [diff] [blame] | 325 | free_vmemmap_page_list(&vmemmap_pages); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 326 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 327 | return ret; |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 328 | } |
| 329 | |
| 330 | static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, |
| 331 | gfp_t gfp_mask, struct list_head *list) |
| 332 | { |
| 333 | unsigned long nr_pages = (end - start) >> PAGE_SHIFT; |
| 334 | int nid = page_to_nid((struct page *)start); |
| 335 | struct page *page, *next; |
| 336 | |
| 337 | while (nr_pages--) { |
| 338 | page = alloc_pages_node(nid, gfp_mask, 0); |
| 339 | if (!page) |
| 340 | goto out; |
| 341 | list_add_tail(&page->lru, list); |
| 342 | } |
| 343 | |
| 344 | return 0; |
| 345 | out: |
| 346 | list_for_each_entry_safe(page, next, list, lru) |
| 347 | __free_pages(page, 0); |
| 348 | return -ENOMEM; |
| 349 | } |
| 350 | |
| 351 | /** |
| 352 | * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end) |
| 353 | * to the page which is from the @vmemmap_pages |
| 354 | * respectively. |
| 355 | * @start: start address of the vmemmap virtual address range that we want |
| 356 | * to remap. |
| 357 | * @end: end address of the vmemmap virtual address range that we want to |
| 358 | * remap. |
| 359 | * @reuse: reuse address. |
| 360 | * @gfp_mask: GFP flag for allocating vmemmap pages. |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 361 | * |
| 362 | * Return: %0 on success, negative error code otherwise. |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 363 | */ |
| 364 | int vmemmap_remap_alloc(unsigned long start, unsigned long end, |
| 365 | unsigned long reuse, gfp_t gfp_mask) |
| 366 | { |
| 367 | LIST_HEAD(vmemmap_pages); |
| 368 | struct vmemmap_remap_walk walk = { |
| 369 | .remap_pte = vmemmap_restore_pte, |
| 370 | .reuse_addr = reuse, |
| 371 | .vmemmap_pages = &vmemmap_pages, |
| 372 | }; |
| 373 | |
| 374 | /* See the comment in the vmemmap_remap_free(). */ |
| 375 | BUG_ON(start - reuse != PAGE_SIZE); |
| 376 | |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 377 | if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages)) |
| 378 | return -ENOMEM; |
| 379 | |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 380 | mmap_read_lock(&init_mm); |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 381 | vmemmap_remap_range(reuse, end, &walk); |
Muchun Song | 3bc2b6a | 2021-06-30 18:48:22 -0700 | [diff] [blame] | 382 | mmap_read_unlock(&init_mm); |
Muchun Song | ad2fa37 | 2021-06-30 18:47:21 -0700 | [diff] [blame] | 383 | |
| 384 | return 0; |
| 385 | } |
| 386 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 387 | /* |
| 388 | * Allocate a block of memory to be used to back the virtual memory map |
| 389 | * or to back the page tables that are used to create the mapping. |
| 390 | * Uses the main allocators if they are available, else bootmem. |
| 391 | */ |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 392 | |
Fabian Frederick | bd721ea | 2016-08-02 14:03:33 -0700 | [diff] [blame] | 393 | static void * __ref __earlyonly_bootmem_alloc(int node, |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 394 | unsigned long size, |
| 395 | unsigned long align, |
| 396 | unsigned long goal) |
| 397 | { |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 398 | return memblock_alloc_try_nid_raw(size, align, goal, |
Mike Rapoport | 97ad108 | 2018-10-30 15:09:44 -0700 | [diff] [blame] | 399 | MEMBLOCK_ALLOC_ACCESSIBLE, node); |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 400 | } |
| 401 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 402 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
| 403 | { |
| 404 | /* If the main allocator is up use that, fallback to bootmem. */ |
| 405 | if (slab_is_available()) { |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 406 | gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; |
| 407 | int order = get_order(size); |
| 408 | static bool warned; |
Shaohua Li | f52407c | 2009-09-21 17:01:19 -0700 | [diff] [blame] | 409 | struct page *page; |
| 410 | |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 411 | page = alloc_pages_node(node, gfp_mask, order); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 412 | if (page) |
| 413 | return page_address(page); |
Michal Hocko | fcdaf842 | 2017-11-15 17:38:56 -0800 | [diff] [blame] | 414 | |
| 415 | if (!warned) { |
| 416 | warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, |
| 417 | "vmemmap alloc failure: order:%u", order); |
| 418 | warned = true; |
| 419 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 420 | return NULL; |
| 421 | } else |
KAMEZAWA Hiroyuki | e0dc3a5 | 2007-11-28 16:21:57 -0800 | [diff] [blame] | 422 | return __earlyonly_bootmem_alloc(node, size, size, |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 423 | __pa(MAX_DMA_ADDRESS)); |
| 424 | } |
| 425 | |
Anshuman Khandual | 56993b4 | 2020-08-06 23:23:24 -0700 | [diff] [blame] | 426 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
| 427 | struct vmem_altmap *altmap); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 428 | |
Anshuman Khandual | 56993b4 | 2020-08-06 23:23:24 -0700 | [diff] [blame] | 429 | /* need to make sure size is all the same during early stage */ |
| 430 | void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, |
| 431 | struct vmem_altmap *altmap) |
| 432 | { |
| 433 | void *ptr; |
| 434 | |
| 435 | if (altmap) |
| 436 | return altmap_alloc_block_buf(size, altmap); |
| 437 | |
| 438 | ptr = sparse_buffer_alloc(size); |
Pavel Tatashin | 35fd1eb | 2018-08-17 15:49:21 -0700 | [diff] [blame] | 439 | if (!ptr) |
| 440 | ptr = vmemmap_alloc_block(size, node); |
Yinghai Lu | 9bdac91 | 2010-02-10 01:20:22 -0800 | [diff] [blame] | 441 | return ptr; |
| 442 | } |
| 443 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 444 | static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) |
| 445 | { |
| 446 | return altmap->base_pfn + altmap->reserve + altmap->alloc |
| 447 | + altmap->align; |
| 448 | } |
| 449 | |
| 450 | static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) |
| 451 | { |
| 452 | unsigned long allocated = altmap->alloc + altmap->align; |
| 453 | |
| 454 | if (altmap->free > allocated) |
| 455 | return altmap->free - allocated; |
| 456 | return 0; |
| 457 | } |
| 458 | |
Anshuman Khandual | 56993b4 | 2020-08-06 23:23:24 -0700 | [diff] [blame] | 459 | static void * __meminit altmap_alloc_block_buf(unsigned long size, |
| 460 | struct vmem_altmap *altmap) |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 461 | { |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 462 | unsigned long pfn, nr_pfns, nr_align; |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 463 | |
| 464 | if (size & ~PAGE_MASK) { |
| 465 | pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", |
| 466 | __func__, size); |
| 467 | return NULL; |
| 468 | } |
| 469 | |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 470 | pfn = vmem_altmap_next_pfn(altmap); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 471 | nr_pfns = size >> PAGE_SHIFT; |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 472 | nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); |
| 473 | nr_align = ALIGN(pfn, nr_align) - pfn; |
| 474 | if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) |
| 475 | return NULL; |
| 476 | |
| 477 | altmap->alloc += nr_pfns; |
| 478 | altmap->align += nr_align; |
| 479 | pfn += nr_align; |
| 480 | |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 481 | pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", |
| 482 | __func__, pfn, altmap->alloc, altmap->align, nr_pfns); |
Christoph Hellwig | eb80453 | 2017-12-29 08:53:59 +0100 | [diff] [blame] | 483 | return __va(__pfn_to_phys(pfn)); |
Dan Williams | 4b94ffd | 2016-01-15 16:56:22 -0800 | [diff] [blame] | 484 | } |
| 485 | |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 486 | void __meminit vmemmap_verify(pte_t *pte, int node, |
| 487 | unsigned long start, unsigned long end) |
| 488 | { |
| 489 | unsigned long pfn = pte_pfn(*pte); |
| 490 | int actual_node = early_pfn_to_nid(pfn); |
| 491 | |
David Rientjes | b41ad14 | 2008-11-06 12:53:31 -0800 | [diff] [blame] | 492 | if (node_distance(actual_node, node) > LOCAL_DISTANCE) |
Joe Perches | 1170532 | 2016-03-17 14:19:50 -0700 | [diff] [blame] | 493 | pr_warn("[%lx-%lx] potential offnode page_structs\n", |
| 494 | start, end - 1); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 495 | } |
| 496 | |
Anshuman Khandual | 1d9cfee | 2020-08-06 23:23:19 -0700 | [diff] [blame] | 497 | pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
| 498 | struct vmem_altmap *altmap) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 499 | { |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 500 | pte_t *pte = pte_offset_kernel(pmd, addr); |
| 501 | if (pte_none(*pte)) { |
| 502 | pte_t entry; |
Anshuman Khandual | 1d9cfee | 2020-08-06 23:23:19 -0700 | [diff] [blame] | 503 | void *p; |
| 504 | |
Anshuman Khandual | 56993b4 | 2020-08-06 23:23:24 -0700 | [diff] [blame] | 505 | p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 506 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 507 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 508 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
| 509 | set_pte_at(&init_mm, addr, pte, entry); |
| 510 | } |
| 511 | return pte; |
| 512 | } |
| 513 | |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 514 | static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) |
| 515 | { |
| 516 | void *p = vmemmap_alloc_block(size, node); |
| 517 | |
| 518 | if (!p) |
| 519 | return NULL; |
| 520 | memset(p, 0, size); |
| 521 | |
| 522 | return p; |
| 523 | } |
| 524 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 525 | pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) |
| 526 | { |
| 527 | pmd_t *pmd = pmd_offset(pud, addr); |
| 528 | if (pmd_none(*pmd)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 529 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 530 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 531 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 532 | pmd_populate_kernel(&init_mm, pmd, p); |
| 533 | } |
| 534 | return pmd; |
| 535 | } |
| 536 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 537 | pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 538 | { |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 539 | pud_t *pud = pud_offset(p4d, addr); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 540 | if (pud_none(*pud)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 541 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 542 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 543 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 544 | pud_populate(&init_mm, pud, p); |
| 545 | } |
| 546 | return pud; |
| 547 | } |
| 548 | |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 549 | p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) |
| 550 | { |
| 551 | p4d_t *p4d = p4d_offset(pgd, addr); |
| 552 | if (p4d_none(*p4d)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 553 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 554 | if (!p) |
| 555 | return NULL; |
| 556 | p4d_populate(&init_mm, p4d, p); |
| 557 | } |
| 558 | return p4d; |
| 559 | } |
| 560 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 561 | pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) |
| 562 | { |
| 563 | pgd_t *pgd = pgd_offset_k(addr); |
| 564 | if (pgd_none(*pgd)) { |
Pavel Tatashin | f7f9910 | 2017-11-15 17:36:44 -0800 | [diff] [blame] | 565 | void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 566 | if (!p) |
Al Viro | 9dce07f | 2008-03-29 03:07:28 +0000 | [diff] [blame] | 567 | return NULL; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 568 | pgd_populate(&init_mm, pgd, p); |
| 569 | } |
| 570 | return pgd; |
| 571 | } |
| 572 | |
Anshuman Khandual | 1d9cfee | 2020-08-06 23:23:19 -0700 | [diff] [blame] | 573 | int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, |
| 574 | int node, struct vmem_altmap *altmap) |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 575 | { |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 576 | unsigned long addr = start; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 577 | pgd_t *pgd; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 578 | p4d_t *p4d; |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 579 | pud_t *pud; |
| 580 | pmd_t *pmd; |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 581 | pte_t *pte; |
| 582 | |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 583 | for (; addr < end; addr += PAGE_SIZE) { |
| 584 | pgd = vmemmap_pgd_populate(addr, node); |
| 585 | if (!pgd) |
| 586 | return -ENOMEM; |
Kirill A. Shutemov | c2febaf | 2017-03-09 17:24:07 +0300 | [diff] [blame] | 587 | p4d = vmemmap_p4d_populate(pgd, addr, node); |
| 588 | if (!p4d) |
| 589 | return -ENOMEM; |
| 590 | pud = vmemmap_pud_populate(p4d, addr, node); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 591 | if (!pud) |
| 592 | return -ENOMEM; |
| 593 | pmd = vmemmap_pmd_populate(pud, addr, node); |
| 594 | if (!pmd) |
| 595 | return -ENOMEM; |
Anshuman Khandual | 1d9cfee | 2020-08-06 23:23:19 -0700 | [diff] [blame] | 596 | pte = vmemmap_pte_populate(pmd, addr, node, altmap); |
Andy Whitcroft | 29c7111 | 2007-10-16 01:24:14 -0700 | [diff] [blame] | 597 | if (!pte) |
| 598 | return -ENOMEM; |
| 599 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
| 600 | } |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 601 | |
| 602 | return 0; |
| 603 | } |
| 604 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 605 | struct page * __meminit __populate_section_memmap(unsigned long pfn, |
| 606 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 607 | { |
Wei Yang | 6cda7204 | 2020-08-06 23:23:59 -0700 | [diff] [blame] | 608 | unsigned long start = (unsigned long) pfn_to_page(pfn); |
| 609 | unsigned long end = start + nr_pages * sizeof(struct page); |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 610 | |
Wei Yang | 6cda7204 | 2020-08-06 23:23:59 -0700 | [diff] [blame] | 611 | if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || |
| 612 | !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) |
| 613 | return NULL; |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 614 | |
Christoph Hellwig | 7b73d97 | 2017-12-29 08:53:54 +0100 | [diff] [blame] | 615 | if (vmemmap_populate(start, end, nid, altmap)) |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 616 | return NULL; |
| 617 | |
Dan Williams | e9c0a3f0 | 2019-07-18 15:58:11 -0700 | [diff] [blame] | 618 | return pfn_to_page(pfn); |
Christoph Lameter | 8f6aac4 | 2007-10-16 01:24:13 -0700 | [diff] [blame] | 619 | } |