Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 1 | /* |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 2 | * Copyright IBM Corp. 2006 |
| 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
| 4 | */ |
| 5 | |
| 6 | #include <linux/bootmem.h> |
| 7 | #include <linux/pfn.h> |
| 8 | #include <linux/mm.h> |
Paul Gortmaker | ff24b07 | 2017-02-09 15:20:24 -0500 | [diff] [blame] | 9 | #include <linux/init.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 10 | #include <linux/list.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 11 | #include <linux/hugetlb.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 12 | #include <linux/slab.h> |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 13 | #include <linux/memblock.h> |
Heiko Carstens | bab247f | 2016-05-10 16:28:28 +0200 | [diff] [blame] | 14 | #include <asm/cacheflush.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 15 | #include <asm/pgalloc.h> |
| 16 | #include <asm/pgtable.h> |
| 17 | #include <asm/setup.h> |
| 18 | #include <asm/tlbflush.h> |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 19 | #include <asm/sections.h> |
Laura Abbott | e6c7c63 | 2017-05-08 15:58:08 -0700 | [diff] [blame] | 20 | #include <asm/set_memory.h> |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 21 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 22 | static DEFINE_MUTEX(vmem_mutex); |
| 23 | |
| 24 | struct memory_segment { |
| 25 | struct list_head list; |
| 26 | unsigned long start; |
| 27 | unsigned long size; |
| 28 | }; |
| 29 | |
| 30 | static LIST_HEAD(mem_segs); |
| 31 | |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 32 | static void __ref *vmem_alloc_pages(unsigned int order) |
| 33 | { |
Heiko Carstens | 2e9996f | 2016-05-13 11:10:09 +0200 | [diff] [blame] | 34 | unsigned long size = PAGE_SIZE << order; |
| 35 | |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 36 | if (slab_is_available()) |
| 37 | return (void *)__get_free_pages(GFP_KERNEL, order); |
Heiko Carstens | 9e42736 | 2016-10-18 13:35:32 +0200 | [diff] [blame] | 38 | return (void *) memblock_alloc(size, size); |
Heiko Carstens | 67060d9 | 2008-05-30 10:03:27 +0200 | [diff] [blame] | 39 | } |
| 40 | |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 41 | void *vmem_crst_alloc(unsigned long val) |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 42 | { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 43 | unsigned long *table; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 44 | |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 45 | table = vmem_alloc_pages(CRST_ALLOC_ORDER); |
| 46 | if (table) |
| 47 | crst_table_init(table, val); |
| 48 | return table; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 49 | } |
| 50 | |
Heiko Carstens | e8a97e4 | 2016-05-17 10:50:15 +0200 | [diff] [blame] | 51 | pte_t __ref *vmem_pte_alloc(void) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 52 | { |
Heiko Carstens | 9e42736 | 2016-10-18 13:35:32 +0200 | [diff] [blame] | 53 | unsigned long size = PTRS_PER_PTE * sizeof(pte_t); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 54 | pte_t *pte; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 55 | |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 56 | if (slab_is_available()) |
Martin Schwidefsky | 527e30b | 2014-04-30 16:04:25 +0200 | [diff] [blame] | 57 | pte = (pte_t *) page_table_alloc(&init_mm); |
Martin Schwidefsky | 146e4b3 | 2008-02-09 18:24:35 +0100 | [diff] [blame] | 58 | else |
Heiko Carstens | 9e42736 | 2016-10-18 13:35:32 +0200 | [diff] [blame] | 59 | pte = (pte_t *) memblock_alloc(size, size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 60 | if (!pte) |
| 61 | return NULL; |
Heiko Carstens | 9e42736 | 2016-10-18 13:35:32 +0200 | [diff] [blame] | 62 | clear_table((unsigned long *) pte, _PAGE_INVALID, size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 63 | return pte; |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * Add a physical memory range to the 1:1 mapping. |
| 68 | */ |
Heiko Carstens | bab247f | 2016-05-10 16:28:28 +0200 | [diff] [blame] | 69 | static int vmem_add_mem(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 70 | { |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 71 | unsigned long pgt_prot, sgt_prot, r3_prot; |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 72 | unsigned long pages4k, pages1m, pages2g; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 73 | unsigned long end = start + size; |
| 74 | unsigned long address = start; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 75 | pgd_t *pg_dir; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 76 | p4d_t *p4_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 77 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 78 | pmd_t *pm_dir; |
| 79 | pte_t *pt_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 80 | int ret = -ENOMEM; |
| 81 | |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 82 | pgt_prot = pgprot_val(PAGE_KERNEL); |
| 83 | sgt_prot = pgprot_val(SEGMENT_KERNEL); |
| 84 | r3_prot = pgprot_val(REGION3_KERNEL); |
| 85 | if (!MACHINE_HAS_NX) { |
| 86 | pgt_prot &= ~_PAGE_NOEXEC; |
| 87 | sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC; |
| 88 | r3_prot &= ~_REGION_ENTRY_NOEXEC; |
| 89 | } |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 90 | pages4k = pages1m = pages2g = 0; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 91 | while (address < end) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 92 | pg_dir = pgd_offset_k(address); |
| 93 | if (pgd_none(*pg_dir)) { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 94 | p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 95 | if (!p4_dir) |
| 96 | goto out; |
| 97 | pgd_populate(&init_mm, pg_dir, p4_dir); |
| 98 | } |
| 99 | p4_dir = p4d_offset(pg_dir, address); |
| 100 | if (p4d_none(*p4_dir)) { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 101 | pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 102 | if (!pu_dir) |
| 103 | goto out; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 104 | p4d_populate(&init_mm, p4_dir, pu_dir); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 105 | } |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 106 | pu_dir = pud_offset(p4_dir, address); |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 107 | if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && |
Christian Borntraeger | 10917b8 | 2016-03-15 14:57:36 -0700 | [diff] [blame] | 108 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) && |
| 109 | !debug_pagealloc_enabled()) { |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 110 | pud_val(*pu_dir) = address | r3_prot; |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 111 | address += PUD_SIZE; |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 112 | pages2g++; |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 113 | continue; |
| 114 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 115 | if (pud_none(*pu_dir)) { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 116 | pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 117 | if (!pm_dir) |
| 118 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 119 | pud_populate(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 120 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 121 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 122 | if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && |
Christian Borntraeger | 10917b8 | 2016-03-15 14:57:36 -0700 | [diff] [blame] | 123 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && |
| 124 | !debug_pagealloc_enabled()) { |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 125 | pmd_val(*pm_dir) = address | sgt_prot; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 126 | address += PMD_SIZE; |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 127 | pages1m++; |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 128 | continue; |
| 129 | } |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 130 | if (pmd_none(*pm_dir)) { |
Heiko Carstens | c53db52 | 2016-05-09 15:52:28 +0200 | [diff] [blame] | 131 | pt_dir = vmem_pte_alloc(); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 132 | if (!pt_dir) |
| 133 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 134 | pmd_populate(&init_mm, pm_dir, pt_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | pt_dir = pte_offset_kernel(pm_dir, address); |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 138 | pte_val(*pt_dir) = address | pgt_prot; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 139 | address += PAGE_SIZE; |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 140 | pages4k++; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 141 | } |
| 142 | ret = 0; |
| 143 | out: |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 144 | update_page_count(PG_DIRECT_MAP_4K, pages4k); |
| 145 | update_page_count(PG_DIRECT_MAP_1M, pages1m); |
| 146 | update_page_count(PG_DIRECT_MAP_2G, pages2g); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 147 | return ret; |
| 148 | } |
| 149 | |
| 150 | /* |
| 151 | * Remove a physical memory range from the 1:1 mapping. |
| 152 | * Currently only invalidates page table entries. |
| 153 | */ |
| 154 | static void vmem_remove_range(unsigned long start, unsigned long size) |
| 155 | { |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 156 | unsigned long pages4k, pages1m, pages2g; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 157 | unsigned long end = start + size; |
| 158 | unsigned long address = start; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 159 | pgd_t *pg_dir; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 160 | p4d_t *p4_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 161 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 162 | pmd_t *pm_dir; |
| 163 | pte_t *pt_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 164 | |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 165 | pages4k = pages1m = pages2g = 0; |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 166 | while (address < end) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 167 | pg_dir = pgd_offset_k(address); |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 168 | if (pgd_none(*pg_dir)) { |
| 169 | address += PGDIR_SIZE; |
| 170 | continue; |
| 171 | } |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 172 | p4_dir = p4d_offset(pg_dir, address); |
| 173 | if (p4d_none(*p4_dir)) { |
| 174 | address += P4D_SIZE; |
| 175 | continue; |
| 176 | } |
| 177 | pu_dir = pud_offset(p4_dir, address); |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 178 | if (pud_none(*pu_dir)) { |
| 179 | address += PUD_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 180 | continue; |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 181 | } |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 182 | if (pud_large(*pu_dir)) { |
| 183 | pud_clear(pu_dir); |
| 184 | address += PUD_SIZE; |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 185 | pages2g++; |
Heiko Carstens | 18da236 | 2012-10-08 09:18:26 +0200 | [diff] [blame] | 186 | continue; |
| 187 | } |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 188 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 189 | if (pmd_none(*pm_dir)) { |
| 190 | address += PMD_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 191 | continue; |
Heiko Carstens | fc7e48aa | 2012-10-08 07:54:32 +0200 | [diff] [blame] | 192 | } |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 193 | if (pmd_large(*pm_dir)) { |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 194 | pmd_clear(pm_dir); |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 195 | address += PMD_SIZE; |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 196 | pages1m++; |
Gerald Schaefer | 53492b1 | 2008-04-30 13:38:46 +0200 | [diff] [blame] | 197 | continue; |
| 198 | } |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 199 | pt_dir = pte_offset_kernel(pm_dir, address); |
Heiko Carstens | 5aa2997 | 2016-05-17 12:17:51 +0200 | [diff] [blame] | 200 | pte_clear(&init_mm, address, pt_dir); |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 201 | address += PAGE_SIZE; |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 202 | pages4k++; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 203 | } |
Heiko Carstens | 378b1e7 | 2012-10-01 12:58:34 +0200 | [diff] [blame] | 204 | flush_tlb_kernel_range(start, end); |
Heiko Carstens | 37cd944 | 2016-05-20 08:08:14 +0200 | [diff] [blame] | 205 | update_page_count(PG_DIRECT_MAP_4K, -pages4k); |
| 206 | update_page_count(PG_DIRECT_MAP_1M, -pages1m); |
| 207 | update_page_count(PG_DIRECT_MAP_2G, -pages2g); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | /* |
| 211 | * Add a backed mem_map array to the virtual mem_map array. |
| 212 | */ |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 213 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 214 | { |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 215 | unsigned long pgt_prot, sgt_prot; |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 216 | unsigned long address = start; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 217 | pgd_t *pg_dir; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 218 | p4d_t *p4_dir; |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 219 | pud_t *pu_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 220 | pmd_t *pm_dir; |
| 221 | pte_t *pt_dir; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 222 | int ret = -ENOMEM; |
| 223 | |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 224 | pgt_prot = pgprot_val(PAGE_KERNEL); |
| 225 | sgt_prot = pgprot_val(SEGMENT_KERNEL); |
| 226 | if (!MACHINE_HAS_NX) { |
| 227 | pgt_prot &= ~_PAGE_NOEXEC; |
| 228 | sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC; |
| 229 | } |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 230 | for (address = start; address < end;) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 231 | pg_dir = pgd_offset_k(address); |
| 232 | if (pgd_none(*pg_dir)) { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 233 | p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 234 | if (!p4_dir) |
| 235 | goto out; |
| 236 | pgd_populate(&init_mm, pg_dir, p4_dir); |
| 237 | } |
| 238 | |
| 239 | p4_dir = p4d_offset(pg_dir, address); |
| 240 | if (p4d_none(*p4_dir)) { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 241 | pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 242 | if (!pu_dir) |
| 243 | goto out; |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 244 | p4d_populate(&init_mm, p4_dir, pu_dir); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 245 | } |
| 246 | |
Martin Schwidefsky | 1aea9b3 | 2017-04-24 18:19:10 +0200 | [diff] [blame] | 247 | pu_dir = pud_offset(p4_dir, address); |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 248 | if (pud_none(*pu_dir)) { |
Heiko Carstens | a01ef30 | 2017-06-16 17:51:15 +0200 | [diff] [blame^] | 249 | pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 250 | if (!pm_dir) |
| 251 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 252 | pud_populate(&init_mm, pu_dir, pm_dir); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 253 | } |
| 254 | |
Martin Schwidefsky | 190a1d7 | 2007-10-22 12:52:48 +0200 | [diff] [blame] | 255 | pm_dir = pmd_offset(pu_dir, address); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 256 | if (pmd_none(*pm_dir)) { |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 257 | /* Use 1MB frames for vmemmap if available. We always |
| 258 | * use large frames even if they are only partially |
| 259 | * used. |
| 260 | * Otherwise we would have also page tables since |
| 261 | * vmemmap_populate gets called for each section |
| 262 | * separately. */ |
| 263 | if (MACHINE_HAS_EDAT1) { |
| 264 | void *new_page; |
| 265 | |
| 266 | new_page = vmemmap_alloc_block(PMD_SIZE, node); |
| 267 | if (!new_page) |
| 268 | goto out; |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 269 | pmd_val(*pm_dir) = __pa(new_page) | sgt_prot; |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 270 | address = (address + PMD_SIZE) & PMD_MASK; |
| 271 | continue; |
| 272 | } |
Heiko Carstens | c53db52 | 2016-05-09 15:52:28 +0200 | [diff] [blame] | 273 | pt_dir = vmem_pte_alloc(); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 274 | if (!pt_dir) |
| 275 | goto out; |
Martin Schwidefsky | b2fa47e | 2011-05-23 10:24:40 +0200 | [diff] [blame] | 276 | pmd_populate(&init_mm, pm_dir, pt_dir); |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 277 | } else if (pmd_large(*pm_dir)) { |
| 278 | address = (address + PMD_SIZE) & PMD_MASK; |
| 279 | continue; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 280 | } |
| 281 | |
| 282 | pt_dir = pte_offset_kernel(pm_dir, address); |
| 283 | if (pte_none(*pt_dir)) { |
Heiko Carstens | 70c9d29 | 2014-09-20 11:12:08 +0200 | [diff] [blame] | 284 | void *new_page; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 285 | |
Heiko Carstens | 70c9d29 | 2014-09-20 11:12:08 +0200 | [diff] [blame] | 286 | new_page = vmemmap_alloc_block(PAGE_SIZE, node); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 287 | if (!new_page) |
| 288 | goto out; |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 289 | pte_val(*pt_dir) = __pa(new_page) | pgt_prot; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 290 | } |
Heiko Carstens | f781796 | 2012-10-17 12:18:05 +0200 | [diff] [blame] | 291 | address += PAGE_SIZE; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 292 | } |
| 293 | ret = 0; |
| 294 | out: |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 295 | return ret; |
| 296 | } |
| 297 | |
Johannes Weiner | 0aad818 | 2013-04-29 15:07:50 -0700 | [diff] [blame] | 298 | void vmemmap_free(unsigned long start, unsigned long end) |
Tang Chen | 0197518 | 2013-02-22 16:33:08 -0800 | [diff] [blame] | 299 | { |
| 300 | } |
| 301 | |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 302 | /* |
| 303 | * Add memory segment to the segment list if it doesn't overlap with |
| 304 | * an already present segment. |
| 305 | */ |
| 306 | static int insert_memory_segment(struct memory_segment *seg) |
| 307 | { |
| 308 | struct memory_segment *tmp; |
| 309 | |
Heiko Carstens | ee0ddad | 2008-06-10 10:03:20 +0200 | [diff] [blame] | 310 | if (seg->start + seg->size > VMEM_MAX_PHYS || |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 311 | seg->start + seg->size < seg->start) |
| 312 | return -ERANGE; |
| 313 | |
| 314 | list_for_each_entry(tmp, &mem_segs, list) { |
| 315 | if (seg->start >= tmp->start + tmp->size) |
| 316 | continue; |
| 317 | if (seg->start + seg->size <= tmp->start) |
| 318 | continue; |
| 319 | return -ENOSPC; |
| 320 | } |
| 321 | list_add(&seg->list, &mem_segs); |
| 322 | return 0; |
| 323 | } |
| 324 | |
| 325 | /* |
| 326 | * Remove memory segment from the segment list. |
| 327 | */ |
| 328 | static void remove_memory_segment(struct memory_segment *seg) |
| 329 | { |
| 330 | list_del(&seg->list); |
| 331 | } |
| 332 | |
| 333 | static void __remove_shared_memory(struct memory_segment *seg) |
| 334 | { |
| 335 | remove_memory_segment(seg); |
| 336 | vmem_remove_range(seg->start, seg->size); |
| 337 | } |
| 338 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 339 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 340 | { |
| 341 | struct memory_segment *seg; |
| 342 | int ret; |
| 343 | |
| 344 | mutex_lock(&vmem_mutex); |
| 345 | |
| 346 | ret = -ENOENT; |
| 347 | list_for_each_entry(seg, &mem_segs, list) { |
| 348 | if (seg->start == start && seg->size == size) |
| 349 | break; |
| 350 | } |
| 351 | |
| 352 | if (seg->start != start || seg->size != size) |
| 353 | goto out; |
| 354 | |
| 355 | ret = 0; |
| 356 | __remove_shared_memory(seg); |
| 357 | kfree(seg); |
| 358 | out: |
| 359 | mutex_unlock(&vmem_mutex); |
| 360 | return ret; |
| 361 | } |
| 362 | |
Heiko Carstens | 17f3458 | 2008-04-30 13:38:47 +0200 | [diff] [blame] | 363 | int vmem_add_mapping(unsigned long start, unsigned long size) |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 364 | { |
| 365 | struct memory_segment *seg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 366 | int ret; |
| 367 | |
| 368 | mutex_lock(&vmem_mutex); |
| 369 | ret = -ENOMEM; |
| 370 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 371 | if (!seg) |
| 372 | goto out; |
| 373 | seg->start = start; |
| 374 | seg->size = size; |
| 375 | |
| 376 | ret = insert_memory_segment(seg); |
| 377 | if (ret) |
| 378 | goto out_free; |
| 379 | |
Heiko Carstens | bab247f | 2016-05-10 16:28:28 +0200 | [diff] [blame] | 380 | ret = vmem_add_mem(start, size); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 381 | if (ret) |
| 382 | goto out_remove; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 383 | goto out; |
| 384 | |
| 385 | out_remove: |
| 386 | __remove_shared_memory(seg); |
| 387 | out_free: |
| 388 | kfree(seg); |
| 389 | out: |
| 390 | mutex_unlock(&vmem_mutex); |
| 391 | return ret; |
| 392 | } |
| 393 | |
| 394 | /* |
| 395 | * map whole physical memory to virtual memory (identity mapping) |
Christian Borntraeger | 5fd9c6e | 2008-01-26 14:11:00 +0100 | [diff] [blame] | 396 | * we reserve enough space in the vmalloc area for vmemmap to hotplug |
| 397 | * additional memory segments. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 398 | */ |
| 399 | void __init vmem_map_init(void) |
| 400 | { |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 401 | struct memblock_region *reg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 402 | |
Heiko Carstens | bab247f | 2016-05-10 16:28:28 +0200 | [diff] [blame] | 403 | for_each_memblock(memory, reg) |
| 404 | vmem_add_mem(reg->base, reg->size); |
Martin Schwidefsky | 57d7f93 | 2016-03-22 10:54:24 +0100 | [diff] [blame] | 405 | __set_memory((unsigned long) _stext, |
| 406 | (_etext - _stext) >> PAGE_SHIFT, |
| 407 | SET_MEMORY_RO | SET_MEMORY_X); |
| 408 | __set_memory((unsigned long) _etext, |
| 409 | (_eshared - _etext) >> PAGE_SHIFT, |
| 410 | SET_MEMORY_RO); |
| 411 | __set_memory((unsigned long) _sinittext, |
| 412 | (_einittext - _sinittext) >> PAGE_SHIFT, |
| 413 | SET_MEMORY_RO | SET_MEMORY_X); |
| 414 | pr_info("Write protected kernel read-only data: %luk\n", |
| 415 | (_eshared - _stext) >> 10); |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | /* |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 419 | * Convert memblock.memory to a memory segment list so there is a single |
| 420 | * list that contains all memory segments. |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 421 | */ |
| 422 | static int __init vmem_convert_memory_chunk(void) |
| 423 | { |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 424 | struct memblock_region *reg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 425 | struct memory_segment *seg; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 426 | |
| 427 | mutex_lock(&vmem_mutex); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 428 | for_each_memblock(memory, reg) { |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 429 | seg = kzalloc(sizeof(*seg), GFP_KERNEL); |
| 430 | if (!seg) |
| 431 | panic("Out of memory...\n"); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 432 | seg->start = reg->base; |
| 433 | seg->size = reg->size; |
Heiko Carstens | f4eb07c | 2006-12-08 15:56:07 +0100 | [diff] [blame] | 434 | insert_memory_segment(seg); |
| 435 | } |
| 436 | mutex_unlock(&vmem_mutex); |
| 437 | return 0; |
| 438 | } |
| 439 | |
| 440 | core_initcall(vmem_convert_memory_chunk); |