blob: 243d8b1185bfcdbf39d0480aa7245f11c7eb89cc [file] [log] [blame]
Greg Kroah-Hartmana17ae4c2017-11-24 15:00:32 +01001// SPDX-License-Identifier: GPL-2.0
Martin Schwidefskyb0206322008-12-25 13:38:36 +01002/*
3 * vdso setup for s390
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
Martin Schwidefskyb0206322008-12-25 13:38:36 +01007 */
8
Paul Gortmaker3994a522017-02-09 15:20:23 -05009#include <linux/init.h>
Martin Schwidefskyb0206322008-12-25 13:38:36 +010010#include <linux/errno.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/stddef.h>
16#include <linux/unistd.h>
17#include <linux/slab.h>
18#include <linux/user.h>
19#include <linux/elf.h>
20#include <linux/security.h>
Mike Rapoport57c8a662018-10-30 15:09:49 -070021#include <linux/memblock.h>
Heiko Carstens77575912009-06-12 10:26:25 +020022#include <linux/compat.h>
Heiko Carstenscbb870c2010-02-26 22:37:43 +010023#include <asm/asm-offsets.h>
Martin Schwidefskyb0206322008-12-25 13:38:36 +010024#include <asm/pgtable.h>
Martin Schwidefskyb0206322008-12-25 13:38:36 +010025#include <asm/processor.h>
26#include <asm/mmu.h>
27#include <asm/mmu_context.h>
28#include <asm/sections.h>
29#include <asm/vdso.h>
David Howellsa0616cd2012-03-28 18:30:02 +010030#include <asm/facility.h>
Martin Schwidefskyb0206322008-12-25 13:38:36 +010031
Arnd Bergmann96ca7672019-04-08 23:26:15 +020032#ifdef CONFIG_COMPAT_VDSO
Martin Schwidefskyb0206322008-12-25 13:38:36 +010033extern char vdso32_start, vdso32_end;
34static void *vdso32_kbase = &vdso32_start;
35static unsigned int vdso32_pages;
36static struct page **vdso32_pagelist;
37#endif
38
Martin Schwidefskyb0206322008-12-25 13:38:36 +010039extern char vdso64_start, vdso64_end;
40static void *vdso64_kbase = &vdso64_start;
41static unsigned int vdso64_pages;
42static struct page **vdso64_pagelist;
Martin Schwidefskyb0206322008-12-25 13:38:36 +010043
44/*
45 * Should the kernel map a VDSO page into processes and pass its
46 * address down to glibc upon exec()?
47 */
48unsigned int __read_mostly vdso_enabled = 1;
49
Souptick Joarderef4b8912018-07-22 19:11:09 +053050static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
Martin Schwidefsky35bb0922017-05-15 10:23:38 +020051 struct vm_area_struct *vma, struct vm_fault *vmf)
52{
53 struct page **vdso_pagelist;
54 unsigned long vdso_pages;
55
56 vdso_pagelist = vdso64_pagelist;
57 vdso_pages = vdso64_pages;
Arnd Bergmann96ca7672019-04-08 23:26:15 +020058#ifdef CONFIG_COMPAT_VDSO
Vasily Gorbikd1befa652018-09-14 17:29:39 +020059 if (vma->vm_mm->context.compat_mm) {
Martin Schwidefsky35bb0922017-05-15 10:23:38 +020060 vdso_pagelist = vdso32_pagelist;
61 vdso_pages = vdso32_pages;
62 }
63#endif
64
65 if (vmf->pgoff >= vdso_pages)
66 return VM_FAULT_SIGBUS;
67
68 vmf->page = vdso_pagelist[vmf->pgoff];
69 get_page(vmf->page);
70 return 0;
71}
72
73static int vdso_mremap(const struct vm_special_mapping *sm,
74 struct vm_area_struct *vma)
75{
76 unsigned long vdso_pages;
77
78 vdso_pages = vdso64_pages;
Arnd Bergmann96ca7672019-04-08 23:26:15 +020079#ifdef CONFIG_COMPAT_VDSO
Vasily Gorbikd1befa652018-09-14 17:29:39 +020080 if (vma->vm_mm->context.compat_mm)
Martin Schwidefsky35bb0922017-05-15 10:23:38 +020081 vdso_pages = vdso32_pages;
82#endif
83
84 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
85 return -EINVAL;
86
87 if (WARN_ON_ONCE(current->mm != vma->vm_mm))
88 return -EFAULT;
89
90 current->mm->context.vdso_base = vma->vm_start;
91 return 0;
92}
93
94static const struct vm_special_mapping vdso_mapping = {
95 .name = "[vdso]",
96 .fault = vdso_fault,
97 .mremap = vdso_mremap,
98};
99
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100100static int __init vdso_setup(char *s)
101{
Martin Schwidefsky7aa79f92009-06-12 10:26:20 +0200102 unsigned long val;
103 int rc;
104
105 rc = 0;
106 if (strncmp(s, "on", 3) == 0)
107 vdso_enabled = 1;
108 else if (strncmp(s, "off", 4) == 0)
109 vdso_enabled = 0;
110 else {
Heiko Carstens958d9072013-07-22 06:43:57 +0200111 rc = kstrtoul(s, 0, &val);
Martin Schwidefsky7aa79f92009-06-12 10:26:20 +0200112 vdso_enabled = rc ? 0 : !!val;
113 }
114 return !rc;
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100115}
116__setup("vdso=", vdso_setup);
117
118/*
119 * The vdso data page
120 */
121static union {
122 struct vdso_data data;
123 u8 page[PAGE_SIZE];
Tim Abbottabe1ee32009-09-20 18:14:15 -0400124} vdso_data_store __page_aligned_data;
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100125struct vdso_data *vdso_data = &vdso_data_store.data;
126
127/*
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100128 * Setup vdso data page.
129 */
Martin Schwidefsky249c5432016-01-05 13:29:38 +0100130static void __init vdso_init_data(struct vdso_data *vd)
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100131{
Martin Schwidefskye258d712013-09-24 09:14:56 +0200132 vd->ectg_available = test_facility(31);
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100133}
134
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100135/*
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100136 * Allocate/free per cpu vdso data.
137 */
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100138#define SEGMENT_ORDER 2
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100139
Martin Schwidefsky608796f2017-10-13 12:59:22 +0200140/*
141 * The initial vdso_data structure for the boot CPU. Eventually
142 * it is replaced with a properly allocated structure in vdso_init.
143 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
144 * pointer is required to be able to return from an interrupt or
145 * program check. See the exit paths in entry.S.
146 */
147struct vdso_data boot_vdso_data __initdata;
148
149void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
150{
151 lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
152}
153
Heiko Carstensc667aea2015-12-31 10:29:00 +0100154int vdso_alloc_per_cpu(struct lowcore *lowcore)
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100155{
156 unsigned long segment_table, page_table, page_frame;
Martin Schwidefsky249c5432016-01-05 13:29:38 +0100157 struct vdso_per_cpu_data *vd;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100158
159 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200160 page_table = get_zeroed_page(GFP_KERNEL);
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100161 page_frame = get_zeroed_page(GFP_KERNEL);
162 if (!segment_table || !page_table || !page_frame)
163 goto out;
Martin Schwidefskyc9b5ad52016-06-14 12:56:01 +0200164 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
165 arch_set_page_dat(virt_to_page(page_table), 0);
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100166
Martin Schwidefsky249c5432016-01-05 13:29:38 +0100167 /* Initialize per-cpu vdso data page */
168 vd = (struct vdso_per_cpu_data *) page_frame;
169 vd->cpu_nr = lowcore->cpu_nr;
170 vd->node_id = cpu_to_node(vd->cpu_nr);
171
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200172 /* Set up page table for the vdso address space */
Heiko Carstens41879ff2017-10-04 19:27:07 +0200173 memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
174 memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100175
176 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
Martin Schwidefskye5098612013-07-23 20:57:57 +0200177 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100178
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200179 lowcore->vdso_asce = segment_table +
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100180 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100181 lowcore->vdso_per_cpu_data = page_frame;
182
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100183 return 0;
184
185out:
186 free_page(page_frame);
187 free_page(page_table);
188 free_pages(segment_table, SEGMENT_ORDER);
189 return -ENOMEM;
190}
191
Heiko Carstensc667aea2015-12-31 10:29:00 +0100192void vdso_free_per_cpu(struct lowcore *lowcore)
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100193{
194 unsigned long segment_table, page_table, page_frame;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100195
Martin Schwidefsky0aaba412017-08-22 12:08:22 +0200196 segment_table = lowcore->vdso_asce & PAGE_MASK;
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100197 page_table = *(unsigned long *) segment_table;
198 page_frame = *(unsigned long *) page_table;
199
200 free_page(page_frame);
201 free_page(page_table);
202 free_pages(segment_table, SEGMENT_ORDER);
203}
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100204
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100205/*
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100206 * This is called from binfmt_elf, we create the special vma for the
207 * vDSO and insert it into the mm struct tree
208 */
209int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
210{
211 struct mm_struct *mm = current->mm;
Martin Schwidefsky35bb0922017-05-15 10:23:38 +0200212 struct vm_area_struct *vma;
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100213 unsigned long vdso_pages;
214 unsigned long vdso_base;
215 int rc;
216
217 if (!vdso_enabled)
218 return 0;
219 /*
220 * Only map the vdso for dynamically linked elf binaries.
221 */
222 if (!uses_interp)
223 return 0;
224
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100225 vdso_pages = vdso64_pages;
Arnd Bergmann96ca7672019-04-08 23:26:15 +0200226#ifdef CONFIG_COMPAT_VDSO
Vasily Gorbik190f0562019-01-02 13:43:22 +0100227 mm->context.compat_mm = is_compat_task();
228 if (mm->context.compat_mm)
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100229 vdso_pages = vdso32_pages;
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100230#endif
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100231 /*
232 * vDSO has a problem and was disabled, just don't "enable" it for
233 * the process
234 */
235 if (vdso_pages == 0)
236 return 0;
237
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100238 /*
239 * pick a base address for the vDSO in process space. We try to put
240 * it at vdso_base which is the "natural" base for it, but we might
241 * fail and end up putting it elsewhere.
242 */
Michal Hocko69048172016-05-23 16:25:54 -0700243 if (down_write_killable(&mm->mmap_sem))
244 return -EINTR;
Heiko Carstense7828bb2011-01-12 09:55:24 +0100245 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100246 if (IS_ERR_VALUE(vdso_base)) {
247 rc = vdso_base;
248 goto out_up;
249 }
250
251 /*
252 * our vma flags don't have VM_WRITE so by default, the process
253 * isn't allowed to write those pages.
254 * gdb can break that with ptrace interface, and thus trigger COW
255 * on those pages but it's then your responsibility to never do that
256 * on the "data" page of the vDSO or you'll stop getting kernel
257 * updates and your nice userland gettimeofday will be totally dead.
258 * It's fine to use that for setting breakpoints in the vDSO code
Jason Baron909af762012-03-23 15:02:51 -0700259 * pages though.
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100260 */
Martin Schwidefsky35bb0922017-05-15 10:23:38 +0200261 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
262 VM_READ|VM_EXEC|
263 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
264 &vdso_mapping);
265 if (IS_ERR(vma)) {
266 rc = PTR_ERR(vma);
267 goto out_up;
268 }
269
270 current->mm->context.vdso_base = vdso_base;
271 rc = 0;
272
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100273out_up:
274 up_write(&mm->mmap_sem);
275 return rc;
276}
277
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100278static int __init vdso_init(void)
279{
280 int i;
281
Martin Schwidefskyc742b312008-12-31 15:11:42 +0100282 vdso_init_data(vdso_data);
Arnd Bergmann96ca7672019-04-08 23:26:15 +0200283#ifdef CONFIG_COMPAT_VDSO
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100284 /* Calculate the size of the 32 bit vDSO */
285 vdso32_pages = ((&vdso32_end - &vdso32_start
286 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
287
288 /* Make sure pages are in the correct state */
Kees Cook6396bb22018-06-12 14:03:40 -0700289 vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100290 GFP_KERNEL);
291 BUG_ON(vdso32_pagelist == NULL);
292 for (i = 0; i < vdso32_pages - 1; i++) {
293 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100294 get_page(pg);
295 vdso32_pagelist[i] = pg;
296 }
297 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
298 vdso32_pagelist[vdso32_pages] = NULL;
299#endif
300
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100301 /* Calculate the size of the 64 bit vDSO */
302 vdso64_pages = ((&vdso64_end - &vdso64_start
303 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
304
305 /* Make sure pages are in the correct state */
Kees Cook6396bb22018-06-12 14:03:40 -0700306 vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100307 GFP_KERNEL);
308 BUG_ON(vdso64_pagelist == NULL);
309 for (i = 0; i < vdso64_pages - 1; i++) {
310 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100311 get_page(pg);
312 vdso64_pagelist[i] = pg;
313 }
314 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
315 vdso64_pagelist[vdso64_pages] = NULL;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400316 if (vdso_alloc_per_cpu(&S390_lowcore))
Heiko Carstens81ffa042009-01-09 12:14:54 +0100317 BUG();
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100318
319 get_page(virt_to_page(vdso_data));
320
Martin Schwidefskyb0206322008-12-25 13:38:36 +0100321 return 0;
322}
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400323early_initcall(vdso_init);