Greg Kroah-Hartman | a17ae4c | 2017-11-24 15:00:32 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 2 | /* |
| 3 | * vdso setup for s390 |
| 4 | * |
| 5 | * Copyright IBM Corp. 2008 |
| 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
Paul Gortmaker | 3994a52 | 2017-02-09 15:20:23 -0500 | [diff] [blame] | 9 | #include <linux/init.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 10 | #include <linux/errno.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/smp.h> |
| 15 | #include <linux/stddef.h> |
| 16 | #include <linux/unistd.h> |
| 17 | #include <linux/slab.h> |
| 18 | #include <linux/user.h> |
| 19 | #include <linux/elf.h> |
| 20 | #include <linux/security.h> |
Mike Rapoport | 57c8a66 | 2018-10-30 15:09:49 -0700 | [diff] [blame] | 21 | #include <linux/memblock.h> |
Heiko Carstens | 7757591 | 2009-06-12 10:26:25 +0200 | [diff] [blame] | 22 | #include <linux/compat.h> |
Heiko Carstens | cbb870c | 2010-02-26 22:37:43 +0100 | [diff] [blame] | 23 | #include <asm/asm-offsets.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 24 | #include <asm/pgtable.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 25 | #include <asm/processor.h> |
| 26 | #include <asm/mmu.h> |
| 27 | #include <asm/mmu_context.h> |
| 28 | #include <asm/sections.h> |
| 29 | #include <asm/vdso.h> |
David Howells | a0616cd | 2012-03-28 18:30:02 +0100 | [diff] [blame] | 30 | #include <asm/facility.h> |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 31 | |
Arnd Bergmann | 96ca767 | 2019-04-08 23:26:15 +0200 | [diff] [blame] | 32 | #ifdef CONFIG_COMPAT_VDSO |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 33 | extern char vdso32_start, vdso32_end; |
| 34 | static void *vdso32_kbase = &vdso32_start; |
| 35 | static unsigned int vdso32_pages; |
| 36 | static struct page **vdso32_pagelist; |
| 37 | #endif |
| 38 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 39 | extern char vdso64_start, vdso64_end; |
| 40 | static void *vdso64_kbase = &vdso64_start; |
| 41 | static unsigned int vdso64_pages; |
| 42 | static struct page **vdso64_pagelist; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 43 | |
| 44 | /* |
| 45 | * Should the kernel map a VDSO page into processes and pass its |
| 46 | * address down to glibc upon exec()? |
| 47 | */ |
| 48 | unsigned int __read_mostly vdso_enabled = 1; |
| 49 | |
Souptick Joarder | ef4b891 | 2018-07-22 19:11:09 +0530 | [diff] [blame] | 50 | static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 51 | struct vm_area_struct *vma, struct vm_fault *vmf) |
| 52 | { |
| 53 | struct page **vdso_pagelist; |
| 54 | unsigned long vdso_pages; |
| 55 | |
| 56 | vdso_pagelist = vdso64_pagelist; |
| 57 | vdso_pages = vdso64_pages; |
Arnd Bergmann | 96ca767 | 2019-04-08 23:26:15 +0200 | [diff] [blame] | 58 | #ifdef CONFIG_COMPAT_VDSO |
Vasily Gorbik | d1befa65 | 2018-09-14 17:29:39 +0200 | [diff] [blame] | 59 | if (vma->vm_mm->context.compat_mm) { |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 60 | vdso_pagelist = vdso32_pagelist; |
| 61 | vdso_pages = vdso32_pages; |
| 62 | } |
| 63 | #endif |
| 64 | |
| 65 | if (vmf->pgoff >= vdso_pages) |
| 66 | return VM_FAULT_SIGBUS; |
| 67 | |
| 68 | vmf->page = vdso_pagelist[vmf->pgoff]; |
| 69 | get_page(vmf->page); |
| 70 | return 0; |
| 71 | } |
| 72 | |
| 73 | static int vdso_mremap(const struct vm_special_mapping *sm, |
| 74 | struct vm_area_struct *vma) |
| 75 | { |
| 76 | unsigned long vdso_pages; |
| 77 | |
| 78 | vdso_pages = vdso64_pages; |
Arnd Bergmann | 96ca767 | 2019-04-08 23:26:15 +0200 | [diff] [blame] | 79 | #ifdef CONFIG_COMPAT_VDSO |
Vasily Gorbik | d1befa65 | 2018-09-14 17:29:39 +0200 | [diff] [blame] | 80 | if (vma->vm_mm->context.compat_mm) |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 81 | vdso_pages = vdso32_pages; |
| 82 | #endif |
| 83 | |
| 84 | if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) |
| 85 | return -EINVAL; |
| 86 | |
| 87 | if (WARN_ON_ONCE(current->mm != vma->vm_mm)) |
| 88 | return -EFAULT; |
| 89 | |
| 90 | current->mm->context.vdso_base = vma->vm_start; |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | static const struct vm_special_mapping vdso_mapping = { |
| 95 | .name = "[vdso]", |
| 96 | .fault = vdso_fault, |
| 97 | .mremap = vdso_mremap, |
| 98 | }; |
| 99 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 100 | static int __init vdso_setup(char *s) |
| 101 | { |
Martin Schwidefsky | 7aa79f9 | 2009-06-12 10:26:20 +0200 | [diff] [blame] | 102 | unsigned long val; |
| 103 | int rc; |
| 104 | |
| 105 | rc = 0; |
| 106 | if (strncmp(s, "on", 3) == 0) |
| 107 | vdso_enabled = 1; |
| 108 | else if (strncmp(s, "off", 4) == 0) |
| 109 | vdso_enabled = 0; |
| 110 | else { |
Heiko Carstens | 958d907 | 2013-07-22 06:43:57 +0200 | [diff] [blame] | 111 | rc = kstrtoul(s, 0, &val); |
Martin Schwidefsky | 7aa79f9 | 2009-06-12 10:26:20 +0200 | [diff] [blame] | 112 | vdso_enabled = rc ? 0 : !!val; |
| 113 | } |
| 114 | return !rc; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 115 | } |
| 116 | __setup("vdso=", vdso_setup); |
| 117 | |
| 118 | /* |
| 119 | * The vdso data page |
| 120 | */ |
| 121 | static union { |
| 122 | struct vdso_data data; |
| 123 | u8 page[PAGE_SIZE]; |
Tim Abbott | abe1ee3 | 2009-09-20 18:14:15 -0400 | [diff] [blame] | 124 | } vdso_data_store __page_aligned_data; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 125 | struct vdso_data *vdso_data = &vdso_data_store.data; |
| 126 | |
| 127 | /* |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 128 | * Setup vdso data page. |
| 129 | */ |
Martin Schwidefsky | 249c543 | 2016-01-05 13:29:38 +0100 | [diff] [blame] | 130 | static void __init vdso_init_data(struct vdso_data *vd) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 131 | { |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 132 | vd->ectg_available = test_facility(31); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 133 | } |
| 134 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 135 | /* |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 136 | * Allocate/free per cpu vdso data. |
| 137 | */ |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 138 | #define SEGMENT_ORDER 2 |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 139 | |
Martin Schwidefsky | 608796f | 2017-10-13 12:59:22 +0200 | [diff] [blame] | 140 | /* |
| 141 | * The initial vdso_data structure for the boot CPU. Eventually |
| 142 | * it is replaced with a properly allocated structure in vdso_init. |
| 143 | * This is necessary because a valid S390_lowcore.vdso_per_cpu_data |
| 144 | * pointer is required to be able to return from an interrupt or |
| 145 | * program check. See the exit paths in entry.S. |
| 146 | */ |
| 147 | struct vdso_data boot_vdso_data __initdata; |
| 148 | |
| 149 | void __init vdso_alloc_boot_cpu(struct lowcore *lowcore) |
| 150 | { |
| 151 | lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data; |
| 152 | } |
| 153 | |
Heiko Carstens | c667aea | 2015-12-31 10:29:00 +0100 | [diff] [blame] | 154 | int vdso_alloc_per_cpu(struct lowcore *lowcore) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 155 | { |
| 156 | unsigned long segment_table, page_table, page_frame; |
Martin Schwidefsky | 249c543 | 2016-01-05 13:29:38 +0100 | [diff] [blame] | 157 | struct vdso_per_cpu_data *vd; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 158 | |
| 159 | segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER); |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 160 | page_table = get_zeroed_page(GFP_KERNEL); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 161 | page_frame = get_zeroed_page(GFP_KERNEL); |
| 162 | if (!segment_table || !page_table || !page_frame) |
| 163 | goto out; |
Martin Schwidefsky | c9b5ad5 | 2016-06-14 12:56:01 +0200 | [diff] [blame] | 164 | arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER); |
| 165 | arch_set_page_dat(virt_to_page(page_table), 0); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 166 | |
Martin Schwidefsky | 249c543 | 2016-01-05 13:29:38 +0100 | [diff] [blame] | 167 | /* Initialize per-cpu vdso data page */ |
| 168 | vd = (struct vdso_per_cpu_data *) page_frame; |
| 169 | vd->cpu_nr = lowcore->cpu_nr; |
| 170 | vd->node_id = cpu_to_node(vd->cpu_nr); |
| 171 | |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 172 | /* Set up page table for the vdso address space */ |
Heiko Carstens | 41879ff | 2017-10-04 19:27:07 +0200 | [diff] [blame] | 173 | memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES); |
| 174 | memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE); |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 175 | |
| 176 | *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; |
Martin Schwidefsky | e509861 | 2013-07-23 20:57:57 +0200 | [diff] [blame] | 177 | *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 178 | |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 179 | lowcore->vdso_asce = segment_table + |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 180 | _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 181 | lowcore->vdso_per_cpu_data = page_frame; |
| 182 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 183 | return 0; |
| 184 | |
| 185 | out: |
| 186 | free_page(page_frame); |
| 187 | free_page(page_table); |
| 188 | free_pages(segment_table, SEGMENT_ORDER); |
| 189 | return -ENOMEM; |
| 190 | } |
| 191 | |
Heiko Carstens | c667aea | 2015-12-31 10:29:00 +0100 | [diff] [blame] | 192 | void vdso_free_per_cpu(struct lowcore *lowcore) |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 193 | { |
| 194 | unsigned long segment_table, page_table, page_frame; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 195 | |
Martin Schwidefsky | 0aaba41 | 2017-08-22 12:08:22 +0200 | [diff] [blame] | 196 | segment_table = lowcore->vdso_asce & PAGE_MASK; |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 197 | page_table = *(unsigned long *) segment_table; |
| 198 | page_frame = *(unsigned long *) page_table; |
| 199 | |
| 200 | free_page(page_frame); |
| 201 | free_page(page_table); |
| 202 | free_pages(segment_table, SEGMENT_ORDER); |
| 203 | } |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 204 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 205 | /* |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 206 | * This is called from binfmt_elf, we create the special vma for the |
| 207 | * vDSO and insert it into the mm struct tree |
| 208 | */ |
| 209 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) |
| 210 | { |
| 211 | struct mm_struct *mm = current->mm; |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 212 | struct vm_area_struct *vma; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 213 | unsigned long vdso_pages; |
| 214 | unsigned long vdso_base; |
| 215 | int rc; |
| 216 | |
| 217 | if (!vdso_enabled) |
| 218 | return 0; |
| 219 | /* |
| 220 | * Only map the vdso for dynamically linked elf binaries. |
| 221 | */ |
| 222 | if (!uses_interp) |
| 223 | return 0; |
| 224 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 225 | vdso_pages = vdso64_pages; |
Arnd Bergmann | 96ca767 | 2019-04-08 23:26:15 +0200 | [diff] [blame] | 226 | #ifdef CONFIG_COMPAT_VDSO |
Vasily Gorbik | 190f056 | 2019-01-02 13:43:22 +0100 | [diff] [blame] | 227 | mm->context.compat_mm = is_compat_task(); |
| 228 | if (mm->context.compat_mm) |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 229 | vdso_pages = vdso32_pages; |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 230 | #endif |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 231 | /* |
| 232 | * vDSO has a problem and was disabled, just don't "enable" it for |
| 233 | * the process |
| 234 | */ |
| 235 | if (vdso_pages == 0) |
| 236 | return 0; |
| 237 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 238 | /* |
| 239 | * pick a base address for the vDSO in process space. We try to put |
| 240 | * it at vdso_base which is the "natural" base for it, but we might |
| 241 | * fail and end up putting it elsewhere. |
| 242 | */ |
Michal Hocko | 6904817 | 2016-05-23 16:25:54 -0700 | [diff] [blame] | 243 | if (down_write_killable(&mm->mmap_sem)) |
| 244 | return -EINTR; |
Heiko Carstens | e7828bb | 2011-01-12 09:55:24 +0100 | [diff] [blame] | 245 | vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 246 | if (IS_ERR_VALUE(vdso_base)) { |
| 247 | rc = vdso_base; |
| 248 | goto out_up; |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * our vma flags don't have VM_WRITE so by default, the process |
| 253 | * isn't allowed to write those pages. |
| 254 | * gdb can break that with ptrace interface, and thus trigger COW |
| 255 | * on those pages but it's then your responsibility to never do that |
| 256 | * on the "data" page of the vDSO or you'll stop getting kernel |
| 257 | * updates and your nice userland gettimeofday will be totally dead. |
| 258 | * It's fine to use that for setting breakpoints in the vDSO code |
Jason Baron | 909af76 | 2012-03-23 15:02:51 -0700 | [diff] [blame] | 259 | * pages though. |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 260 | */ |
Martin Schwidefsky | 35bb092 | 2017-05-15 10:23:38 +0200 | [diff] [blame] | 261 | vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, |
| 262 | VM_READ|VM_EXEC| |
| 263 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, |
| 264 | &vdso_mapping); |
| 265 | if (IS_ERR(vma)) { |
| 266 | rc = PTR_ERR(vma); |
| 267 | goto out_up; |
| 268 | } |
| 269 | |
| 270 | current->mm->context.vdso_base = vdso_base; |
| 271 | rc = 0; |
| 272 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 273 | out_up: |
| 274 | up_write(&mm->mmap_sem); |
| 275 | return rc; |
| 276 | } |
| 277 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 278 | static int __init vdso_init(void) |
| 279 | { |
| 280 | int i; |
| 281 | |
Martin Schwidefsky | c742b31 | 2008-12-31 15:11:42 +0100 | [diff] [blame] | 282 | vdso_init_data(vdso_data); |
Arnd Bergmann | 96ca767 | 2019-04-08 23:26:15 +0200 | [diff] [blame] | 283 | #ifdef CONFIG_COMPAT_VDSO |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 284 | /* Calculate the size of the 32 bit vDSO */ |
| 285 | vdso32_pages = ((&vdso32_end - &vdso32_start |
| 286 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
| 287 | |
| 288 | /* Make sure pages are in the correct state */ |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 289 | vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *), |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 290 | GFP_KERNEL); |
| 291 | BUG_ON(vdso32_pagelist == NULL); |
| 292 | for (i = 0; i < vdso32_pages - 1; i++) { |
| 293 | struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 294 | get_page(pg); |
| 295 | vdso32_pagelist[i] = pg; |
| 296 | } |
| 297 | vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); |
| 298 | vdso32_pagelist[vdso32_pages] = NULL; |
| 299 | #endif |
| 300 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 301 | /* Calculate the size of the 64 bit vDSO */ |
| 302 | vdso64_pages = ((&vdso64_end - &vdso64_start |
| 303 | + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; |
| 304 | |
| 305 | /* Make sure pages are in the correct state */ |
Kees Cook | 6396bb2 | 2018-06-12 14:03:40 -0700 | [diff] [blame] | 306 | vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *), |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 307 | GFP_KERNEL); |
| 308 | BUG_ON(vdso64_pagelist == NULL); |
| 309 | for (i = 0; i < vdso64_pages - 1; i++) { |
| 310 | struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 311 | get_page(pg); |
| 312 | vdso64_pagelist[i] = pg; |
| 313 | } |
| 314 | vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data); |
| 315 | vdso64_pagelist[vdso64_pages] = NULL; |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 316 | if (vdso_alloc_per_cpu(&S390_lowcore)) |
Heiko Carstens | 81ffa04 | 2009-01-09 12:14:54 +0100 | [diff] [blame] | 317 | BUG(); |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 318 | |
| 319 | get_page(virt_to_page(vdso_data)); |
| 320 | |
Martin Schwidefsky | b020632 | 2008-12-25 13:38:36 +0100 | [diff] [blame] | 321 | return 0; |
| 322 | } |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 323 | early_initcall(vdso_init); |