blob: 687f2a4d34590cc2fecfa1c5820e81776a7c786c [file] [log] [blame]
Greg Kroah-Hartmanac41aae2017-11-24 15:00:35 +01001// SPDX-License-Identifier: GPL-2.0+
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * flexible mmap layout support
4 *
5 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
6 * All Rights Reserved.
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * Started by Ingo Molnar <mingo@elte.hu>
9 */
10
Heiko Carstensca218722016-05-07 12:15:34 +020011#include <linux/elf-randomize.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/personality.h>
13#include <linux/mm.h>
Martin Schwidefsky638ad342011-10-30 15:17:13 +010014#include <linux/mman.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010015#include <linux/sched/signal.h>
Ingo Molnar01042602017-02-08 18:51:31 +010016#include <linux/sched/mm.h>
Heiko Carstensdf1ca532011-01-12 09:55:27 +010017#include <linux/random.h>
Heiko Carstens048cd4e2012-02-27 10:01:52 +010018#include <linux/compat.h>
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010019#include <linux/security.h>
Martin Schwidefsky6252d702008-02-09 18:24:37 +010020#include <asm/pgalloc.h>
Paul Gortmakerff24b072017-02-09 15:20:24 -050021#include <asm/elf.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Heiko Carstens9046e402011-01-12 09:55:22 +010023static unsigned long stack_maxrandom_size(void)
24{
25 if (!(current->flags & PF_RANDOMIZE))
26 return 0;
27 if (current->personality & ADDR_NO_RANDOMIZE)
28 return 0;
29 return STACK_RND_MASK << PAGE_SHIFT;
30}
31
Kees Cook8f2af152018-04-10 16:34:53 -070032static inline int mmap_is_legacy(struct rlimit *rlim_stack)
Heiko Carstens1060f622011-01-12 09:55:26 +010033{
34 if (current->personality & ADDR_COMPAT_LAYOUT)
35 return 1;
Kees Cook8f2af152018-04-10 16:34:53 -070036 if (rlim_stack->rlim_cur == RLIM_INFINITY)
Heiko Carstens1060f622011-01-12 09:55:26 +010037 return 1;
38 return sysctl_legacy_va_layout;
39}
40
Kees Cook2b68f6c2015-04-14 15:48:00 -070041unsigned long arch_mmap_rnd(void)
Heiko Carstensdf1ca532011-01-12 09:55:27 +010042{
Martin Schwidefskyc7e8b2c2015-11-10 12:30:28 +010043 return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
Heiko Carstensdf1ca532011-01-12 09:55:27 +010044}
45
Kees Cook8e89a352015-04-14 15:47:57 -070046static unsigned long mmap_base_legacy(unsigned long rnd)
Heiko Carstens7aba8422013-11-12 15:07:55 -080047{
Kees Cook8e89a352015-04-14 15:47:57 -070048 return TASK_UNMAPPED_BASE + rnd;
Heiko Carstens7aba8422013-11-12 15:07:55 -080049}
50
Kees Cook8f2af152018-04-10 16:34:53 -070051static inline unsigned long mmap_base(unsigned long rnd,
52 struct rlimit *rlim_stack)
Linus Torvalds1da177e2005-04-16 15:20:36 -070053{
Kees Cook8f2af152018-04-10 16:34:53 -070054 unsigned long gap = rlim_stack->rlim_cur;
Martin Schwidefskya0308c12019-01-29 10:48:09 +010055 unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
56 unsigned long gap_min, gap_max;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Martin Schwidefskya0308c12019-01-29 10:48:09 +010058 /* Values close to RLIM_INFINITY can overflow. */
59 if (gap + pad > gap)
60 gap += pad;
61
62 /*
63 * Top of mmap area (just below the process stack).
64 * Leave at least a ~32 MB hole.
65 */
66 gap_min = 32 * 1024 * 1024UL;
67 gap_max = (STACK_TOP / 6) * 5;
68
69 if (gap < gap_min)
70 gap = gap_min;
71 else if (gap > gap_max)
72 gap = gap_max;
73
74 return PAGE_ALIGN(STACK_TOP - gap - rnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075}
76
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010077unsigned long
78arch_get_unmapped_area(struct file *filp, unsigned long addr,
79 unsigned long len, unsigned long pgoff, unsigned long flags)
80{
81 struct mm_struct *mm = current->mm;
82 struct vm_area_struct *vma;
83 struct vm_unmapped_area_info info;
Martin Schwidefsky9b11c792017-04-24 18:14:48 +020084 int rc;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010085
Martin Schwidefsky9b11c792017-04-24 18:14:48 +020086 if (len > TASK_SIZE - mmap_min_addr)
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010087 return -ENOMEM;
88
89 if (flags & MAP_FIXED)
Martin Schwidefsky9b11c792017-04-24 18:14:48 +020090 goto check_asce_limit;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010091
92 if (addr) {
93 addr = PAGE_ALIGN(addr);
94 vma = find_vma(mm, addr);
Martin Schwidefsky9b11c792017-04-24 18:14:48 +020095 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -070096 (!vma || addr + len <= vm_start_gap(vma)))
Martin Schwidefsky9b11c792017-04-24 18:14:48 +020097 goto check_asce_limit;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +010098 }
99
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100100 info.flags = 0;
101 info.length = len;
102 info.low_limit = mm->mmap_base;
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200103 info.high_limit = TASK_SIZE;
Martin Schwidefskyc7e8b2c2015-11-10 12:30:28 +0100104 if (filp || (flags & MAP_SHARED))
105 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
106 else
107 info.align_mask = 0;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100108 info.align_offset = pgoff << PAGE_SHIFT;
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200109 addr = vm_unmapped_area(&info);
110 if (addr & ~PAGE_MASK)
111 return addr;
112
113check_asce_limit:
Martin Schwidefsky8ab867c2017-08-31 13:18:22 +0200114 if (addr + len > current->mm->context.asce_limit &&
115 addr + len <= TASK_SIZE) {
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200116 rc = crst_table_upgrade(mm, addr + len);
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200117 if (rc)
118 return (unsigned long) rc;
119 }
120
121 return addr;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100122}
123
124unsigned long
125arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
126 const unsigned long len, const unsigned long pgoff,
127 const unsigned long flags)
128{
129 struct vm_area_struct *vma;
130 struct mm_struct *mm = current->mm;
131 unsigned long addr = addr0;
132 struct vm_unmapped_area_info info;
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200133 int rc;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100134
135 /* requested length too big for entire address space */
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200136 if (len > TASK_SIZE - mmap_min_addr)
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100137 return -ENOMEM;
138
139 if (flags & MAP_FIXED)
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200140 goto check_asce_limit;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100141
142 /* requesting a specific address */
143 if (addr) {
144 addr = PAGE_ALIGN(addr);
145 vma = find_vma(mm, addr);
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200146 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
Hugh Dickins1be71072017-06-19 04:03:24 -0700147 (!vma || addr + len <= vm_start_gap(vma)))
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200148 goto check_asce_limit;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100149 }
150
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100151 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
152 info.length = len;
153 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
154 info.high_limit = mm->mmap_base;
Martin Schwidefskyc7e8b2c2015-11-10 12:30:28 +0100155 if (filp || (flags & MAP_SHARED))
156 info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
157 else
158 info.align_mask = 0;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100159 info.align_offset = pgoff << PAGE_SHIFT;
160 addr = vm_unmapped_area(&info);
161
162 /*
163 * A failed mmap() very likely causes application failure,
164 * so fall back to the bottom-up function here. This scenario
165 * can happen with large stack limits and large mmap()
166 * allocations.
167 */
168 if (addr & ~PAGE_MASK) {
169 VM_BUG_ON(addr != -ENOMEM);
170 info.flags = 0;
171 info.low_limit = TASK_UNMAPPED_BASE;
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200172 info.high_limit = TASK_SIZE;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100173 addr = vm_unmapped_area(&info);
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200174 if (addr & ~PAGE_MASK)
175 return addr;
176 }
177
178check_asce_limit:
Martin Schwidefsky8ab867c2017-08-31 13:18:22 +0200179 if (addr + len > current->mm->context.asce_limit &&
180 addr + len <= TASK_SIZE) {
Martin Schwidefsky1aea9b32017-04-24 18:19:10 +0200181 rc = crst_table_upgrade(mm, addr + len);
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200182 if (rc)
183 return (unsigned long) rc;
Martin Schwidefsky1f6b83e2015-01-14 17:51:17 +0100184 }
185
186 return addr;
187}
188
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100189/*
190 * This function, called very early during the creation of a new
191 * process VM image, sets up which VM layout function to use:
192 */
Kees Cook8f2af152018-04-10 16:34:53 -0700193void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100194{
Kees Cook8e89a352015-04-14 15:47:57 -0700195 unsigned long random_factor = 0UL;
196
197 if (current->flags & PF_RANDOMIZE)
Kees Cook2b68f6c2015-04-14 15:48:00 -0700198 random_factor = arch_mmap_rnd();
Kees Cook8e89a352015-04-14 15:47:57 -0700199
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100200 /*
201 * Fall back to the standard layout if the personality
202 * bit is set, or if the expected stack growth is unlimited:
203 */
Kees Cook8f2af152018-04-10 16:34:53 -0700204 if (mmap_is_legacy(rlim_stack)) {
Kees Cook8e89a352015-04-14 15:47:57 -0700205 mm->mmap_base = mmap_base_legacy(random_factor);
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200206 mm->get_unmapped_area = arch_get_unmapped_area;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100207 } else {
Kees Cook8f2af152018-04-10 16:34:53 -0700208 mm->mmap_base = mmap_base(random_factor, rlim_stack);
Martin Schwidefsky9b11c792017-04-24 18:14:48 +0200209 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
Martin Schwidefsky6252d702008-02-09 18:24:37 +0100210 }
211}